## ansbile 스크립트 만들 때 참고 명령어
## 변수 보기
$ ansible -i hosts -m debug -a "var=hostvars['kube-master01']" localhost
$ ansible -i hosts -m debug -a "var=groups['kube-masters']" localhost
## 하나의 task 만 수행 (restart flannel)
$ ansible-playbook -i hosts --start-at-task='restart flannel' 03-flannel.yml
## task 조회
$ ansible-playbook -i hosts 03-flannel.yml --list-tasks
## 모든 소스는 github 에 ...
[ 모든 node 에서 수행 ]
$ sudo su -
# vi /etc/ssh/sshd_config
28 PermitRootLogin yes
52 PasswordAuthentication yes
# systemctl restart sshd
# passwd
# passwd stack
$ sudo apt-get install python2.7 python-minimal
$ sudo apt-get update
$ sudo apt-get install -y ansible
## ntp ntpdate 패키지를 설치해서 ntp 를 세팅해야 함
[ git init 설정 후 github 에 올리기 ]
$ cd ~/kubernetes-ansible
$ git init
$ git remote add origin https://github.com/seungkyua/kubernetes-ansible.git
$ git pull origin master
$ git config user.name "Seungkyu Ahn"
$ git config user.email "seungkyua@gmail.com"
$ git add -A
$ git commit -a -m "Intial commit"
$ git push --set-upstream origin master
[ Prerequisite ]
$ vi README.md
# Prerequisite #
- This ansible-playbook is tested in Ubuntu 16.04 LTS
- Need one Kubernetes deploy node (a.k.a kube-deploy)
- Login from kube-deploy to all Kubernetes nodes by `root` user without password using hostname
- kube-deploy should have swap memory over 2G byte
- Every work should be executed by `stack` user at kube-deploy
- Every nodes should be installed ansible and python packages
```
$ sudo apt-get update
$ sudo apt-get install -y ansible python2.7 python-minimal
```
- Group names and node names can not be changed in `hosts` file
```
[kube-deploy]
kube-deploy
[kube-masters]
kube-master01
kube-master02
[kube-nodes]
kube-node01
kube-node02
[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14
[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
```
- Have to changed your own password at `add_user_password` field in `group_vars/all` file
## Tips ##
An encrypted password can figure out following command
```
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
[ input password and enter]
```
## Execute order ##
```
$ sudo ansible-playbook -i hosts 00-create-user.yml
$ sudo ansible-playbook -i hosts 00-install-package.yml
$ sudo ansible-playbook -i hosts 01-install-docker.yml
$ sudo chown -R stack.stack ~/.ansible
$ ansible-playbook -i hosts 02-etcd.yml
$ ansible-playbook -i hosts 03-flannel.yml
$ ansible-playbook -i hosts 04-kubernetes.yml
```
## restart service ##
- restart docker
```
$ sudo ansible-playbook -i hosts --tags="restart docker" 01-install-docker.yml
```
- restart etcd
```
$ ansible-playbook -i hosts --tags="restart etcd" 02-etcd.yml
```
- restart flannel
```
$ ansible-playbook -i hosts --tags="restart flannel" 03-flannel.yml
```
- restart kubernetes
```
$ ansible-playbook -i hosts --tags="restart kube-apiserver,restart kube-controller-manager,restart kube-scheduler,restart kube-proxy,restart kubelet" 04-kubernetes.yml
```
[ kube-deploy node 에서 수행 ]
## kube-deploy node 접속
$ ssh -i ~/magnum-key.pem stack@192.168.30.138
$ ssh-keygen -t rsa
$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id kube-deploy
# ssh-copy-id kube-master01
# ssh-copy-id kube-master02
# ssh-copy-id kube-node01
# ssh-copy-id kube-node02
$ mkdir -p ~/kubernetes-ansible && cd ~/kubernetes-ansible
$ vi hosts
[kube-deploy]
kube-deploy
[kube-masters]
kube-master01
kube-master02
[kube-nodes]
kube-node01
kube-node02
[kube-masters:vars]
kube-master01-iface=eno49
kube-master01-ip=192.168.30.13
kube-master02-iface=ens2f0
kube-master02-ip=192.168.30.14
[kube-nodes:vars]
kube-node01-iface=eno49
kube-node01-ip=192.168.30.15
kube-node02-iface=ens2f0
kube-node02-ip=192.168.30.16
$ vi ansible.cfg
[defaults]
host_key_checking = False
## password encrypt 값을 알아냄
$ sudo apt-get install -y whois
$ mkpasswd --method=SHA-512
## 환경 변수 세팅
$ mkdir -p group_vars && vi group_vars/all
ansible_dir: "kubernetes-ansible"
gopath_dir: "go_workspace"
add_user: "stack"
add_user_password: "gernerated password here !"
ubuntu_release: "xenial"
kube_deploy_uname_r: "4.4.0-22-generic"
uname_r: "4.4.0-21-generic"
etcd_data_dir: "/ext/data/etcd"
flannel_version: "v0.6.1"
flannel_net: "172.16.0.0/16"
mtu_size: "1500"
kube_version: "release-1.4"
kube_api_ip: "192.168.30.13"
service_cluster_ip_range: "192.168.30.192/26"
service_node_port_range: "30000-32767"
cluster_dns: "192.168.30.200"
cluster_domain: "cluster.local"
$ mkdir -p files && vi files/hosts
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
10.0.0.18 kube-depoly
192.168.30.138 kube-deploy
192.168.30.13 kube-master01
192.168.30.14 kube-master02
192.168.30.15 kube-node01
192.168.30.16 kube-node02
## user 생성 (stack), key 자동 복사, sudo user 등록, 환경변수 세팅, host 파일 복사
$ 00-create-user.yml
---
- name: create the user
hosts: all
remote_user: root
tasks:
- include_vars: group_vars/all
- name: Add the {{ add_user }} user
user: name={{ add_user }} groups=sudo createhome=yes shell=/bin/bash
password={{ add_user_password }} append=yes
generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
- name: Set up authorized_keys for the {{ add_user }}
authorized_key: user={{ add_user }} key="{{ lookup('file', '/home/{{ add_user }}/.ssh/id_rsa.pub') }}"
- name: sudo
lineinfile:
"dest=/etc/sudoers state=present regexp='^{{ add_user }} ALL='
line='{{ add_user }} ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'"
- name: export GOPATH
lineinfile:
"dest=/home/{{ add_user }}/.bashrc state=present regexp='^export GOPATH' line='export GOPATH=$HOME/{{ gopath_dir }}:$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes/Godeps/_workspace'"
- name: export PATH
lineinfile:
"dest=/home/{{ add_user }}/.bashrc state=present regexp='^export PATH'
line='export PATH=$HOME/{{ gopath_dir }}/bin:$PATH'"
- name: export KUBE_ROOT
lineinfile:
"dest=/home/{{ add_user }}/.bashrc state=present regexp='^export KUBE_ROOT'
line='export KUBE_ROOT=$HOME/{{ gopath_dir }}/src/k8s.io/kubernetes'"
- name: Copy hosts file
copy:
src: "files/hosts"
dest: "/etc"
owner: root
$ sudo ansible-playbook -i hosts 00-create-user.yml
## apt-get package 설치
$ vi 00-install-package.yml
---
- name: install package
hosts: kube-deploy kube-masters kube-nodes
remote_user: root
tasks:
- include_vars: group_vars/all
- name: Install apt packages
apt: name={{ item }} update_cache=yes
with_items:
- bridge-utils
- linux-libc-dev
- golang
- gcc
- curl
- git
$ sudo ansible-playbook -i hosts 00-install-package.yml
## docker install
$ vi 01-install-docker.yml
---
# This playbook setup docker package
- hosts: kube-deploy kube-masters kube-nodes
remote_user: root
roles:
- docker
$ mkdir -p roles/docker/files && vi roles/docker/files/docker.xenial.list
deb https://apt.dockerproject.org/repo ubuntu-xenial main
$ vi roles/docker/files/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/etc/default/docker
ExecStart=/usr/bin/docker daemon $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
$ vi roles/docker/files/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock"
$ mkdir -p roles/docker/tasks && vi roles/docker/tasks/main.yml
---
- name: install apt-transport-https ca-certificates
apt: name={{ item }}
with_items:
- apt-transport-https
- ca-certificates
- name: add GPG key
apt_key: keyserver=hkp://p80.pool.sks-keyservers.net:80 \
id=58118E89F3A912897C070ADBF76221572C52609D
- name: add docker.list
copy:
src: "docker.{{ ubuntu_release }}.list"
dest: "/etc/apt/sources.list.d"
owner: root
- name: apt-get update
apt: update_cache=yes
- name: install linux-image-extra kube-deploy
apt: name=linux-image-extra-{{ kube_depoloy_uname_r }}
when: "'kube-deploy' in group_names"
- name: install linux-image-extra kube-masters kube-nodes
apt: name=linux-image-extra-{{ uname_r }}
when: "('kube-masters' in group_names) or ('kube-nodes' in group_names)"
#- name: restart servers
# shell: sleep 2 && shutdown -r now
# async: 0
# poll: 0
# ignore_errors: true
#- name: Waiting for server to come back
# local_action: wait_for host={{ inventory_hostname }} \
# state=started port=22 delay=10 timeout=300
#- name: Update apt
# apt: update_cache=yes
- name: install docker
apt: name=docker-engine
- name: add docker group
user: name={{ add_user }} group=docker
- name: copy docker config
copy:
src: "docker"
dest: "/etc/default"
mode: 0755
owner: root
- name: copy docker.service
copy:
src: "docker.service"
dest: "/lib/systemd/system"
mode: 0644
owner: root
- name: reload systemd
shell: systemctl daemon-reload
- name: restart docker
service: name=docker state=restarted enabled=yes
tags:
- restart docker
- name: export DOCKER_HOST
lineinfile:
"dest=/home/{{ add_user }}/.bashrc state=present regexp='^export DOCKER_HOST'
line='export DOCKER_HOST=127.0.0.1:4243'"
$ sudo ansible-playbook -i hosts 01-install-docker.yml
[ etcd 설치 ]
$ sudo chown -R stack.stack ~/.ansible
$ vi 02-etcd.yml
---
# This playbook installs etcd cluster.
- name: Setup etcd
hosts: kube-masters
remote_user: "{{add_user}}"
become: true
become_user: root
roles:
- etcd
## --listen-peer-urls 과 --listen-client-urls 은 0.0.0.0 주소로 줄 수 있음
$ mkdir -p roles/etcd/templates && vi roles/etcd/templates/etcd.conf.j2
DAEMON_ARGS="--name {{ inventory_hostname }} \
--data-dir={{ etcd_data_dir }} \
--initial-advertise-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-peer-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2380 \
--listen-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379,http://127.0.0.1:2379,\
http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001,http://127.0.0.1:4001 \
--advertise-client-urls http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:2379 \
--initial-cluster-token etcd-cluster-1 \
{% for host in groups['kube-masters'] %}
{% if host == groups['kube-masters']|first %}
--initial-cluster {{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% else %},{{ host }}=http://{{ hostvars[host][host + '-ip'] }}:2380{% endif %}{% endfor %} \
--initial-cluster-state new"
$ mkdir -p roles/etcd/tasks && vi roles/etcd/tasks/main.yml
---
- name: install etcd
apt: name=etcd update_cache=yes
- name: copy etcd config
template: src=etcd.conf.j2 dest=/etc/default/etcd
- name: enable etcd systemd
service: name=etcd enabled=yes
- name: restart etcd
service: name=etcd state=restarted
tags:
- restart etcd
$ ansible-playbook -i hosts 02-etcd.yml
## 특정 task 만 돌릴 경우
$ ansible-playbook -i hosts --start-at-task="Restart etcd" 02-etcd.yml
## etcd 멤버 조회 (2개가 나와야 함)
$ etcdctl member list
## etcd 테스트
$ etcdctl --endpoint http://192.168.30.13:2379 set /test "hello"
$ etcdctl --endpoint http://192.168.30.14:2379 get /test
$ etcdctl --endpoint http://192.168.30.13:2379 rm /test
$ etcdctl --no-sync --endpoint http://kube-master01:2379 --debug ls / -recursive
[ flannel 설치 ]
## flannel 데이터를 etcd 입력 및 설치
$ vi 03-flannel.yml
---
- name: Setup flannel
hosts: kube-deploy kube-masters kube-nodes
remote_user: "{{add_user}}"
become: true
become_user: root
roles:
- flannel
## flannel 을 위한 네트워크 값을 etcd 에 등록하는 스트립트
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/config-flannel.sh.j2
#!/bin/bash
exec curl -s -L http://{{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
## 좀 무식한 방식임
{% if inventory_hostname == 'kube-master01' %}
exec curl -s -L http://{{ groups['kube-masters'][0] }}:4001/v2/keys/coreos.com/network/config \
-XPUT -d value='{"Network": "{{ flannel_net }}", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
{% endif %}
## flannel 을 다운 받아서 build 하는 스크립트
$ vi roles/flannel/templates/download-flannel.sh.j2
#!/bin/bash
FLANNEL_DIR=${HOME}/github/flannel
FLANNEL_VERSION="{{ flannel_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}
function chdir() {
cd $1
}
if [ ! -d ${FLANNEL_DIR} ]; then
mkdir -p ${HOME}/github
chdir ${HOME}/github
git clone https://github.com/coreos/flannel.git
chdir ${FLANNEL_DIR}
git checkout -b ${FLANNEL_VERSION} tags/${FLANNEL_VERSION}
fi
chdir ${FLANNEL_DIR}
if [ ! -f build ]; then
cat <<EOF >build
#!/bin/bash -e
ORG_PATH="github.com/coreos"
REPO_PATH="\${ORG_PATH}/flannel"
if [ ! -h gopath/src/\${REPO_PATH} ]; then
mkdir -p gopath/src/\${ORG_PATH}
ln -s ../../../.. gopath/src/\${REPO_PATH} || exit 255
fi
export GOBIN=\${PWD}/bin
export GOPATH=\${PWD}/gopath
eval \$(go env)
if [ \${GOOS} = "linux" ]; then
echo "Building flanneld..."
go build -o \${GOBIN}/flanneld \${REPO_PATH}
else
echo "Not on Linux - skipping flanneld build"
fi
EOF
fi
chmod 755 build
./build
mkdir -p ${ANSIBLE_HOME}/roles/flannel/files
cp bin/flanneld ${ANSIBLE_HOME}/roles/flannel/files
## flannel.service 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/flanneld.service.j2
[Unit]
Description=flanneld Service
#After=etcd.service
#Requires=etcd.service
[Service]
EnvironmentFile=/etc/default/flanneld
PermissionsStartOnly=true
User=root
ExecStart=/usr/bin/flanneld -etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
Restart=always
RestartSec=10s
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
Alias=flanneld.service
## flannel config 파일
$ vi roles/flannel/templates/flanneld.j2
FLANNEL_ETCD="http://{{ groups['kube-masters'][0] }}:4001"
FLANNEL_OPTIONS="-v 0"
## flannel 과 연계된 docker 설정 파일
$ mkdir -p roles/flannel/templates && vi roles/flannel/templates/docker-config.sh.j2
#! /bin/bash
ip link set dev docker0 down
brctl delbr docker0
source /run/flannel/subnet.env
echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 \
-H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} \
--mtu=${FLANNEL_MTU}\" > /etc/default/docker
## flannel ansible 설치 로직
$ mkdir -p roles/flannel/tasks && vi roles/flannel/tasks/main.yml
---
# set flannel data into etcd
- name: copy config-flannel
template: src=config-flannel.sh.j2 dest=~/config-flannel.sh mode=755
when: inventory_hostname == 'kube-master01'
- name: run config-flannel
command: ~/config-flannel.sh
when: inventory_hostname == 'kube-master01'
- name: remove config-flannel
file: name=~/config-flannel.sh state=absent
when: inventory_hostname == 'kube-master01'
# flannel download, build, install
- name: copy download-flannel
template: src=download-flannel.sh.j2 dest=~/download-flannel.sh
owner={{ add_user }} mode=755
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: run download-flannel
command: ~/download-flannel.sh owner={{ add_user }}
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: remove download-flannel
file: name=~/download-flannel.sh owner={{ add_user }} state=absent
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: copy flanneld
copy: src=flanneld dest=/usr/bin/flanneld owner=root mode=0755
when: "'kube-nodes' in group_names"
- name: copy flanneld.service
template: src=flanneld.service.j2 dest=/lib/systemd/system/flanneld.service
owner=root mode=0644
when: "'kube-nodes' in group_names"
- name: resize MTU
command: ip link set dev {{ hostvars[item][item + '-iface'] }} mtu {{ mtu_size }}
with_items: groups['kube-nodes']
when: "'kube-nodes' in group_names"
- name: copy flanneld config
template: src=flanneld.j2 dest=/etc/default/flanneld
when: "'kube-nodes' in group_names"
- name: reload systemd
shell: systemctl daemon-reload
when: "'kube-nodes' in group_names"
- name: restart flannel
service: name=flanneld state=restarted enabled=yes
when: "'kube-nodes' in group_names"
tags:
- restart flannel
notify:
- restart flannel
## handler 는 task 의 notify 로 호출됨
$ mkdir -p roles/flannel/handlers && vi roles/flannel/handlers/main.yml
---
- name: restart flannel
service: name=flanneld state=restarted
notify:
- stop docker
- delete docker0
- copy docker-config
- run docker-config
- remove docker-config
- start docker
when: "'kube-nodes' in group_names"
- name: stop docker
service: name=docker state=stopped
when: "'kube-nodes' in group_names"
- name: delete docker0
command: ip link delete docker0
ignore_errors: yes
when: "'kube-nodes' in group_names"
- name: copy docker-config
template: src=docker-config.sh.j2 dest=~/docker-config.sh mode=755
when: "'kube-nodes' in group_names"
- name: run docker-config
command: ~/docker-config.sh
ignore_errors: true
when: "'kube-nodes' in group_names"
- name: remove docker-config
file: name=~/docker-config.sh state=absent
when: "'kube-nodes' in group_names"
- name: start docker
service: name=docker state=started
when: "'kube-nodes' in group_names"
$ ansible-playbook -i hosts 03-flannel.yml
###################################
## k8s 소스 다운로드 및 make (ansible)
###################################
## cert 파일 만들기
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-cert.sh
## https://github.com/kubernetes/kubernetes/blob/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
## kubernetes 다운로드 및 설치
$ vi 04-kubernetes.yml
---
- name: Setup kubernetes
hosts: kube-deploy kube-masters kube-nodes
remote_user: "{{add_user}}"
become: true
become_user: root
roles:
- kubernetes
## k8s 을 다운 받아서 build 하는 스크립트 (GOPATH 와 PATH 가 중요)
$ mkdir -p roles/kubernetes/templates && vi roles/kubernetes/templates/download-kubernetes.sh.j2
#!/bin/bash
GO_HOME=${HOME}/{{ gopath_dir }}
KUBE_HOME=${GO_HOME}/src/k8s.io/kubernetes
KUBE_VERSION="{{ kube_version }}"
ANSIBLE_HOME=${HOME}/{{ ansible_dir }}
export GOPATH=${GO_HOME}:${KUBE_HOME}/Godeps/_workspace
export PATH=${GO_HOME}/bin:$PATH
function chdir() {
cd $1
}
if [ ! -d ${KUBE_HOME} ]; then
mkdir -p ${GO_HOME}/src/k8s.io
chdir ${GO_HOME}/src/k8s.io
go get -u github.com/jteeuwen/go-bindata/go-bindata
git clone https://github.com/kubernetes/kubernetes.git
chdir ${KUBE_HOME}
git checkout -b ${KUBE_VERSION} origin/${KUBE_VERSION}
fi
chdir ${KUBE_HOME}
if [ ! -d ${KUBE_HOME}/_output ]; then
make all
fi
mkdir -p ${ANSIBLE_HOME}/roles/kubernetes/files
cp _output/local/bin/linux/amd64/kube* ${ANSIBLE_HOME}/roles/kubernetes/files
## Kubernetes config 파일
$ vi roles/kubernetes/templates/kube-apiserver.conf.j2
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range={{ service_cluster_ip_range }} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,\
ResourceQuota,DenyEscalatingExec,SecurityContextDeny \
--service-node-port-range={{ service_node_port_range }} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"
$ vi roles/kubernetes/templates/kube-apiserver.service.j2
[Unit]
Description=Kubernetes API Server
After=syslog.target network.target
[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-apiserver
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
$ vi roles/kubernetes/templates/kube-controller-manager.conf.j2
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"
$ vi roles/kubernetes/templates/kube-controller-manager.service.j2
[Unit]
Description=Kubernetes Controller Manager
After=syslog.target network.target
[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
$ vi roles/kubernetes/templates/kube-scheduler.conf.j2
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"
$ vi roles/kubernetes/templates/kube-scheduler.service.j2
[Unit]
Description=Kubernetes Scheduler
After=syslog.target network.target
[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
$ vi roles/kubernetes/templates/kubelet.conf.j2
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override={{ hostvars[inventory_hostname][inventory_hostname + '-ip'] }} \
--api-servers=http://{{ kube_api_ip }}:8080 \
--logtostderr=true \
--cluster-dns={{ cluster_dns }} \
--cluster-domain={{ cluster_domain }}"
$ vi roles/kubernetes/templates/kubelet.service.j2
[Unit]
Description=Kubernetes Kubelet
After=syslog.target network.target
[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
$ vi roles/kubernetes/templates/kube-proxy.conf.j2
KUBE_PROXY_OPTS="--master=http://{{ kube_api_ip }}:8080 --logtostderr=true"
$ vi roles/kubernetes/templates/kube-proxy.service.j2
[Unit]
Description=Kubernetes Proxy Server
After=syslog.target network.target
[Service]
Type=simple
User=root
EnvironmentFile=-/etc/default/kube-proxy
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
## kubernetes ansible 설치 로직
$ mkdir -p roles/kubernetes/tasks && vi roles/kubernetes/tasks/main.yml
---
- name: copy download-kubernetes.sh
template: src=download-kubernetes.sh.j2 dest=~/download-kubernetes.sh
owner={{ add_user }} mode=755
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: run download-kubernetes.sh
command: ~/download-kubernetes.sh owner={{ add_user }}
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: remove download-kubernetes.sh
file: name=~/download-kubernetes.sh owner={{ add_user }} state=absent
become: yes
become_user: "{{ add_user }}"
when: "'kube-deploy' in group_names"
- name: add kube-cert group
group: name=kube-cert state=present
- name: make cert
command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-cert.sh
become: yes
become_user: root
when: "'kube-deploy' in group_names"
- name: make ca-cert
command: /home/{{ add_user }}/{{ gopath_dir }}/src/k8s.io/kubernetes/cluster/saltbase/salt/generate-cert/make-ca-cert.sh {{ kube_api_ip }}
become: yes
become_user: root
when: "'kube-deploy' in group_names"
- name: change mod cert
file:
path: /srv/kubernetes
mode: 0755
recurse: yes
when: "'kube-deploy' in group_names"
- name: create cert directory
file: path=/srv/kubernetes state=directory owner=root mode=0755
- name: copy server.cert
copy: src=/srv/kubernetes/server.cert dest=/srv/kubernetes/server.cert
owner=root mode=0600
become: yes
become_user: root
- name: copy server.key
copy: src=/srv/kubernetes/server.key dest=/srv/kubernetes/server.key
owner=root mode=0600
become: yes
become_user: root
- name: copy ca.crt
copy: src=/srv/kubernetes/ca.crt dest=/srv/kubernetes/ca.crt
owner=root mode=0600
become: yes
become_user: root
- name: copy kubectl
copy: src=kubectl dest=/usr/local/bin/kubectl
owner=root mode=0755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-apiserver
copy: src=kube-apiserver dest=/usr/local/bin/kube-apiserver
owner=root mode=0755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-controller-manager
copy: src=kube-controller-manager dest=/usr/local/bin/kube-controller-manager
owner=root mode=0755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-scheduler
copy: src=kube-scheduler dest=/usr/local/bin/kube-scheduler
owner=root mode=0755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kubelet
copy: src=kubelet dest=/usr/local/bin/kubelet
owner=root mode=0755
become: yes
become_user: root
when: "'kube-nodes' in group_names"
- name: copy kube-proxy
copy: src=kube-proxy dest=/usr/local/bin/kube-proxy
owner=root mode=0755
become: yes
become_user: root
when: "'kube-nodes' in group_names"
- name: copy kube-apiserver config
template: src=kube-apiserver.conf.j2 dest=/etc/default/kube-apiserver
owner={{ add_user }} mode=755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-apiserver.service
template: src=kube-apiserver.service.j2 dest=/lib/systemd/system/kube-apiserver.service
owner=root mode=0644
when: "'kube-masters' in group_names"
- name: copy kube-controller-manager config
template: src=kube-controller-manager.conf.j2 dest=/etc/default/kube-controller-manager
owner={{ add_user }} mode=755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-controller-manager.service
template: src=kube-controller-manager.service.j2 dest=/lib/systemd/system/kube-controller-manager.service
owner=root mode=0644
when: "'kube-masters' in group_names"
- name: copy kube-scheduler config
template: src=kube-scheduler.conf.j2 dest=/etc/default/kube-scheduler
owner={{ add_user }} mode=755
become: yes
become_user: root
when: "'kube-masters' in group_names"
- name: copy kube-scheduler.service
template: src=kube-scheduler.service.j2 dest=/lib/systemd/system/kube-scheduler.service
owner=root mode=0644
when: "'kube-masters' in group_names"
- name: copy kubelet config
template: src=kubelet.conf.j2 dest=/etc/default/kubelet
owner={{ add_user }} mode=755
become: yes
become_user: root
when: "'kube-nodes' in group_names"
- name: copy kubelet.service
template: src=kubelet.service.j2 dest=/lib/systemd/system/kubelet.service
owner=root mode=0644
when: "'kube-nodes' in group_names"
- name: copy kube-proxy config
template: src=kube-proxy.conf.j2 dest=/etc/default/kube-proxy
owner={{ add_user }} mode=755
become: yes
become_user: root
when: "'kube-nodes' in group_names"
- name: copy kube-proxy.service
template: src=kube-proxy.service.j2 dest=/lib/systemd/system/kube-proxy.service
owner=root mode=0644
when: "'kube-nodes' in group_names"
- name: reload systemd
shell: systemctl daemon-reload
when: "'kube-masters' in group_names"
- name: restart kube-apiserver
service: name=kube-apiserver state=restarted enabled=yes
when: "'kube-masters' in group_names"
tags:
- restart kube-apiserver
- name: restart kube-controller-manager
service: name=kube-controller-manager state=restarted enabled=yes
when: "'kube-masters' in group_names"
tags:
- restart kube-controller-manager
- name: restart kube-scheduler
service: name=kube-scheduler state=restarted enabled=yes
when: "'kube-masters' in group_names"
tags:
- restart kube-scheduler
- name: restart kubelet
service: name=kubelet state=restarted enabled=yes
when: "'kube-nodes' in group_names"
tags:
- restart kubelet
- name: restart kube-proxy
service: name=kube-proxy state=restarted enabled=yes
when: "'kube-nodes' in group_names"
tags:
- restart kube-proxy
$ ansible-playbook -i hosts 04-kubernetes.yml