Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions ansible/roles/haproxy/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ haproxy_install_mode: 'manifest'
haproxy_config_dir: '/etc/haproxy'

haproxy_image_repo: 'haproxy'
haproxy_image_tag: '2.2.6-apline'
haproxy_image_tag: '2.3.5-alpine'

apiserver_frontend_port: 6443
# apiserver loadbalancer
haproxy_memory_requests: 32M
haproxy_cpu_requests: 25m

haproxy_keepalive_timeout: 5m
haproxy_healthcheck_port: 8401
12 changes: 6 additions & 6 deletions ansible/roles/haproxy/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
---
# This role implements the Software Load Balancing approach described at this location
# https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing
---
- name: 'haproxy | preflight checks'
- name: 'preflight checks'
ansible.builtin.include_tasks: 'pre_checks.yml'

- name: configure necessary keepalived sysctl configuration
- name: 'configure necessary haproxy sysctl configuration'
ansible.builtin.sysctl:
name: 'net.ipv4.ip_nonlocal_bind'
value: 1
value: '1'
state: 'present'
sysctl_file: '/etc/sysctl.d/55-haproxy.conf'
sysctl_set: true

- name: 'haproxy | configure haproxy on control plane nodes'
- name: 'packages | configure haproxy on control plane nodes'
ansible.builtin.include_tasks: 'packages.yml'
when:
- haproxy_install_mode == 'package'

- name: 'haproxy | configure haproxy as a static pod within the cluster'
- name: 'manifests | configure haproxy as a static pod within the cluster'
ansible.builtin.include_tasks: 'manifests.yml'
when:
- haproxy_install_mode == 'manifest'
11 changes: 5 additions & 6 deletions ansible/roles/haproxy/tasks/manifests.yml
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
---
- name: 'haproxy | ensure kubernetes manifests directory "{{ cluster_kubernetes_manifests_path }}" exists'
- name: 'manifests | ensure kubernetes manifests directory "{{ cluster_kubernetes_manifests_path }}" exists'
ansible.builtin.file:
path: '{{ cluster_kubernetes_manifests_path }}'
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cluster_kubernetes_manifests_path isn't being initialized in defaults or the group vars

state: 'directory'
owner: 'root'
group: 'root'
mode: 0700

- name: 'haproxy | ensure haproxy config directory "{{ haproxy_config_dir }}" exists'
- name: 'manifests | ensure haproxy config directory "{{ haproxy_config_dir }}" exists'
ansible.builtin.file:
path: '{{ haproxy_config_dir }}'
state: 'directory'
owner: 'root'
group: 'root'
mode: 0700

- name: 'haproxy | ensure haproxy configuration file is up to date'
- name: 'manifests | ensure haproxy configuration file is up to date'
ansible.builtin.template:
src: 'haproxy.cfg.j2'
dest: '{{ haproxy_config_dir }}/haproxy.cfg'
Expand All @@ -24,19 +24,18 @@
mode: 0644
backup: true

- name: 'haproxy | get current checksum of haproxy config file'
- name: 'manifests | get current checksum of haproxy config file'
ansible.builtin.stat:
path: '{{ haproxy_config_dir }}/haproxy.cfg'
get_attributes: false
get_checksum: true
get_mime: false
register: haproxy_config_stat

- name: 'haproxy | write haproxy static pod manifest'
- name: 'manifests | write haproxy static pod manifest'
ansible.builtin.template:
src: 'haproxy.yaml.j2'
dest: '{{ cluster_kubernetes_manifests_path }}/haproxy.yaml'
owner: 'root'
group: 'root'
mode: 0644
backup: true
6 changes: 3 additions & 3 deletions ansible/roles/haproxy/tasks/pre_checks.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
---
- name: 'validate variable : keepalived_vip'
assert:
ansible.builtin.assert:
that:
- keepalived_vip | ipaddr
fail_msg:
- "Variable 'keepalived_vip' should be a valid ip address"
- 'Variable "keepalived_vip" should be a valid ip address'
- 'Type is: {{ keepalived_vip | type_debug }}'
- "Value is: {{ keepalived_vip | default('undefined') }}"
- 'Value is: {{ keepalived_vip | default("undefined") }}'
72 changes: 54 additions & 18 deletions ansible/roles/haproxy/templates/haproxy.cfg.j2
Original file line number Diff line number Diff line change
@@ -1,23 +1,59 @@
####################
# api-proxy config #
####################
{{ ansible_managed | comment }}

frontend front-api-proxy
bind {{ keepalived_vip }}:8443
bind 127.0.0.1:8443
# This configuration provides some sensible defaults - modeled on the default configuration found here
# https://github.com/haproxytech/haproxy-docker-alpine/blob/master/2.3/haproxy.cfg
global
log stdout format raw local0
stats socket /var/run/haproxy.stat
user haproxy
group haproxy
chroot /var/empty
daemon

defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option redispatch
retries 5
timeout http-request 5m
timeout queue 5m
timeout connect 30s
timeout client {{ haproxy_keepalive_timeout }}
timeout server 15m
timeout http-keep-alive 30s
timeout check 30s
maxconn 4000

frontend stats
bind *:8404
mode http
stats enable
stats uri /stats
stats refresh 10s
stats admin if TRUE

{% if haproxy_healthcheck_port is defined -%}
frontend healthz
bind *:{{ haproxy_healthcheck_port }}
mode http
monitor-uri /healthz
{% endif -%}

frontend apiserver
bind {{ cluster_apiserver_frontend_ip }}:{{ cluster_apiserver_frontend_port }}
Copy link
Member

@xunholy xunholy Mar 4, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cluster_apiserver_frontend_ip & cluster_apiserver_frontend_port isn't being initialized in defaults or the group vars

mode tcp
option tcplog
timeout client 4h
default_backend kube-api
default_backend apiserver

backend kube-api
backend apiserver
mode tcp
option tcplog
option tcp-check
timeout connect 10s
timeout server 4h
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{% for host in groups['masters'] %}
server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_host'] }}:6443 check
{% endfor %}
balance leastconn
default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
option httpchk GET /healthz
http-check expect status 200
{% for host in groups['controlplane'] -%}
server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_default_ipv4'].address }}:{{ cluster_apiserver_port }} check check-ssl verify none
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cluster_apiserver_port is a new var to replace cluster_apiserver_bind_port?

{% endfor -%}
15 changes: 9 additions & 6 deletions ansible/roles/haproxy/templates/haproxy.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,26 @@ metadata:
component: haproxy
tier: control-plane
annotations:
haproxy-cfg-checksum: "{{ haproxy_config_stat.stat.checksum }}"
haproxy-config-checksum: "{{ haproxy_config_stat.stat.checksum }}"
spec:
containers:
- image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }}
imagePullPolicy: IfNotPresent
name: haproxy
resources: {}
resources:
requests:
cpu: {{ haproxy_cpu_requests }}
memory: {{ haproxy_memory_requests }}
{% if haproxy_healthcheck_port is defined -%}
livenessProbe:
httpGet:
path: /healthz
port: {{ apiserver_frontend_port }}
scheme: HTTPS
port: {{ haproxy_healthcheck_port }}
readinessProbe:
httpGet:
path: /healthz
port: {{ apiserver_frontend_port }}
scheme: HTTPS
port: {{ haproxy_healthcheck_port }}
{% endif -%}
volumeMounts:
- mountPath: /usr/local/etc/haproxy/
name: haproxyconf
Expand Down