Mesos 설치하기

Container 2016.06.19 22:38
[ 서버 리스트 ]
mesos-master1   10.0.0.14      192.168.30.134
mesos-agent1     10.0.0.15      192.168.30.135
mesos-bootstrap 10.0.0.16      192.168.30.136
mesos-agent2     10.0.0.17      192.168.30.137



[ OpenStack 에 CentOS 7 다운 및 image 설치 ]
$ . ~/admin/admin-openrc
$ wget http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2

$ openstack image create \
          --file CentOS-7-x86_64-GenericCloud.qcow2 \
          --disk-format qcow2 --container-format bare \
          --public centos-7
$ openstack image list


## Flavor 생성
$ openstack flavor create --id mm1 --ram 32768 --disk 160 --vcpus 4 mesos.master
$ openstack flavor create --id ma1 --ram 16384 --disk 100 --vcpus 2 mesos.agent


## quota 조회
$ tenant=$(openstack project list | awk '/demo/ {print $2}')
$ nova quota-show --tenant $tenant
$ cinder quota-show $tenant
$ openstack stack show $tenant


## quota 업데이트
$ nova quota-update --ram -1 $tenant
$ nova quota-update --cores -1 $tenant
$ nova quota-update --instances -1 $tenant
$ nova quota-update --floating-ips -1 $tenant
$ nova quota-show --tenant $tenant


$ cinder quota-update --volumes -1 $tenant
$ cinder quota-update --snapshots -1 $tenant
$ cinder quota-update --backups -1 $tenant
$ cinder quota-update --gigabytes -1 $tenant
$ cinder quota-update --backup-gigabytes -1 $tenant
$ cinder quota-show --tenant $tenant


## nova boot
$ . ~/demo/demo-openrc
$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor mm1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-master1

$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor ma1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-agent1

$ openstack server create --image 615962cd-4905-4be8-a442-b8ca9b75e720 \
       --flavor ma1 --nic net-id=03a6de58-9693-4c41-9577-9307c8750141 \
       --key-name magnum-key --security-group default mesos-bootstrap


## 접속 테스트
$ sudo ip netns exec qrouter-68cfc511-7e75-4b85-a1ca-d8a09c489ccc \
ssh -i ~/magnum-key centos@10.0.0.14


## floating ip 생성
$ openstack ip floating create public
$ openstack ip floating list
$ openstack ip floating add 192.168.30.134 mesos-master1
$ openstack ip floating add 192.168.30.135 mesos-agent1
$ openstack ip floating add 192.168.30.136 mesos-bootstrap
$ openstack ip floating add 192.168.30.137 mesos-agent2



[ Bootstrap node 에서 ssh 복사 ]
$ ssh-keygen -t rsa
$ ssh-copy-id 10.0.0.14
$ ssh-copy-id 10.0.0.15
$ ssh-copy-id 10.0.0.17

$ sudo su -
# ssh-keygen -t rsa
# ssh-copy-id 10.0.0.14
# ssh-copy-id 10.0.0.15
# ssh-copy-id 10.0.0.17


[ Bootstrap, Master Node, Agent Node 공통 ]
$ sudo yum update
$ sudo yum upgrade -y
$ sudo systemctl stop firewalld && sudo systemctl disable firewalld


## CentOS 7.2 Upgrade
$ sudo yum upgrade --assumeyes --tolerant
$ sudo yum update --assumeyes
$ uname -r
3.10.0-327.18.2.el7.x86_64


## Docker 에서 OverlayFS 사용
$ sudo tee /etc/modules-load.d/overlay.conf <<-'EOF'
overlay
EOF


## reboot
$ reboot


## OverlayFS 확인 
$ lsmod | grep overlay


## Docker yum Repo
$ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/$releasever/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF


## Docker Daemon with OverlayFS
$ sudo mkdir -p /etc/systemd/system/docker.service.d && sudo tee /etc/systemd/system/docker.service.d/override.conf <<- EOF
[Service]
ExecStart=
ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd://
EOF


## Install Docker engin, daemon and service
$ sudo yum install --assumeyes --tolerant docker-engine
$ sudo systemctl start docker
$ sudo systemctl enable docker

## Test docker
$ sudo docker ps



[ DC/OS 설치 ]
[ Bootstrap Node ]
## DC/OS installer download
$ mkdir -p installer
$ cd installer


[ WEB 으로 설치 ]
## start web installer (9000 port)
$ sudo bash dcos_generate_config.sh --web -v


## web 으로 접속


## ip detect script
$ vi ip_detect.sh
#!/bin/bash

IP=$(ip addr show eth0 | awk '/inet /{print substr($2,0,9)}')
echo $IP


[ GUI 로 설치하고 나서 다시 설치할려면 아래 삭제 필요 ]
## 모든 클러스터 노드
$ sudo rm -rf /opt/mesosphere

## bootstrap 노드
$ sudo rm -rf /var/lib/zookeeper



[ CLI 로 설치 ]
$ cd installer
$ mkdir -p genconf && cd genconf
$ sudo vi config.yaml
---
agent_list:
- 10.0.0.15
- 10.0.0.17
#bootstrap_url: file:///opt/dcos_install_tmp
bootstrap_url: http://192.168.30.136:80
cluster_name: DCOS
exhibitor_storage_backend: static
ip_detect_filename: /genconf/ip-detect
master_discovery: static
master_list:
- 10.0.0.14
process_timeout: 10000
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
ssh_user: centos


$ vi ip-detect
#!/bin/bash

IP=$(ip addr show eth0 | awk '/inet /{print substr($2,0,9)}')
echo $IP


$ cd ..
$ sudo bash dcos_generate_config.sh


$ sudo docker run -d -p 80:80 -v $PWD/genconf/serve:/usr/share/nginx/html:ro nginx



[ 모든 Master Node 에서 실행 ]
$ mkdir /tmp/dcos && cd /tmp/dcos
$ curl -O http://10.0.0.16:80/dcos_install.sh
$ sudo bash dcos_install.sh master



[ 모든 Agent Node 에서 실행 ]
$ sudo yum install -y ipset unzip
$ sudo groupadd nogroup
$ mkdir /tmp/dcos && cd /tmp/dcos
$ curl -O http://10.0.0.16:80/dcos_install.sh
$ sudo bash dcos_install.sh slave


## Master Node 설치 상황 보기
$ http://192.168.30.134:8181/exhibitor/v1/ui/index.html





[ CLI 설치 ]
## pip 설치
$ curl -O https://bootstrap.pypa.io/get-pip.py
$ sudo python get-pip.py

$ sudo pip install virtualenv

$ mkdir ~/dcos && cd ~/dcos
$ curl -O https://downloads.dcos.io/dcos-cli/install.sh

$ bash install.sh . http://192.168.30.134

## dcos 명령어를 사용할려면 아래와 같이 실행
source /home/centos/dcos/bin/env-setup


## 다음의 파일 값을 보여줌   :   ~/./dcos/dcos.toml 
$ dcos config show


[ package 조회 및 서비스 설치 ]
$ dcos package search
arangodb          0.3.0
cassandra         1.0.5-2.2.5
chronos           2.4.0
jenkins           0.2.3
kafka             1.0.9-0.10.0.0
marathon          1.1.1
spark             1.0.0-1.6.1-2
avi               16.2
calico            0.1.0
concord           0.3.16.4
confluent         1.0.3-3.0.0
crate             0.1.0
datadog           5.4.3
elasticsearch     0.7.0
etcd              0.0.3
exhibitor         1.0.0
hdfs              2.5.2-0.1.9
hue               0.0.1
kafka-manager     1.3.0.8
linkerd           0.6.0-0.1
marathon-lb       1.2.2
memsql            0.0.1
mr-redis          0.0.1
mysql             5.7.12
namerd            0.6.0-0.1
nginx             1.8.1
openvpn           0.0.0-0.1
openvpn-admin     0.0.0-0.1
quobyte           1.2.1
riak              0.1.1
ruxit             0.1
spark-notebook    0.1.0
storm             0.1.0
vamp              0.8.5
weavescope        0.15.0                                                 
weavescope-probe  0.15.0
zeppelin          0.6.0


$ dcos package install zeppelin
$ dcos package install spark










Posted by Kubernetes Korea co-leader seungkyua@gmail.com

[ VMWare Player Download ]

https://my.vmware.com/en/web/vmware/free#desktop_end_user_computing/

vmware_workstation_player/12_0


[ CentOS 7 다운로드 ]

http://isoredirect.centos.org/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-1511.iso



[ Network 설정 ]

VMnet8 192.168.75.1


https://www.lesstif.com/pages/viewpage.action?pageId=13631535

https://www.centos.org/docs/5/html/Deployment_Guide-en-US/s1-dhcp-configuring-client.html


vi /etc/sysconfig/network-scripts/ifcfg-eno16777728

TYPE=Ethernet

BOOTPROTO=none

DEFROUTE=yes

PEERDNS=yes

PEERROUTES=yes

IPV4_FAILURE_FATAL=no

IPV6INIT=yes

IPV6_AUTOCONF=yes

IPV6_DEFROUTE=yes

IPV6_PEERDNS=yes

IPV6_PEERROUTES=yes

IPV6_FAILURE_FATAL=no

NAME=eno16777728

UUID=aa6807ce-8df6-428d-8af3-f21915570efb

DEVICE=eno16777728

ONBOOT=yes

PREFIX=24

GATEWAY=192.168.75.2

DNS1=192.168.75.2

IPADDR=192.168.75.133



service network restart



[ sudo 세팅 ]

stack   ALL=(ALL:ALL) NOPASSWD:ALL



[ 기술적 컴포넌트 ]

컨테이너 포맷 : libcontainer (네이티브 리눅스 컨테이너 포맷),  lxc (일반적인 컨테이너 포맷)

리눅스 커널 Namespace : 파일시스템, 프로세스, 네트워크간 독립

리눅스 커널 cgroups : CPU, 메모리 고립 및 그룹핑

copy-on-wirte(CoW) : 파일시스템이 복사-쓰기로 생성. 파일시스템이 레이어로 되어 있음

로그 : STDOUT, STDERR, STDIN 이 로그로 쓰여짐

Interactive shell : pseudo-tty 를 생성하여 STDIN 을 연결, 컨테이너와 Interactiv shell로 통신



[ Disk Type ]

AUFS, zfs, btrfs, vfs, Device-mapper, overlayfs



[ 구성 요소 ]

Docker client             : DOCKER_HOST=tcp://192.168.75.133:2375

Docker server            : /etc/sysconfig/docker, /etc/sysconfig/docker-network

Docker images

Docker container



[ Container boot 순서 ]

bootfs -> 컨테이너 메모리로 이동 -> bootfs umount -> initrd 가 사용하는 RAM 해제 ->
rootfs mount (읽기 전용 모드, os 이미지) -> 읽기,쓰기 파일 시스템 mount



[ Device Mapper ]

sudo yum install -y device-mapper


ls -l /sys/class/misc/device-mapper

sudo grep device-mapper /proc/devices

sudo modprobe dm_mod




[ 사전 필요 패키지 설치 ]

sudo yum -y update

sudo yum -y install git tree


yum whatprovides netstat

yum -y install net-tools


 

[ CentOS6 docker 설치 ]

$ sudo rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/

epel-release-6-8.noarch.rpm

$ sudo yum -y install lxc-docker



## docker 설치

$ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'

[dockerrepo]

name=Docker Repository

baseurl=https://yum.dockerproject.org/repo/main/centos/7/

enabled=1

gpgcheck=1

gpgkey=https://yum.dockerproject.org/gpg

EOF


$ sudo yum install docker-engine


## docker 옵션 수정

$ sudo vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/docker daemon -H unix:///var/run/docker.sock \

-H tcp://0.0.0.0:2375


$ sudo systemctl daemon-reload

$ sudo systemctl restart docker

$ sudo systemctl status docker


## 부팅 때 자동으로 실행

$ sudo systemctl enable docker


## stack 유저에 docker 그룹을 추가

$ sudo usermod -aG docker stack



## docker overlayFS 적용

$ sudo tee /etc/modules-load.d/overlay.conf <<-'EOF'

overlay

EOF



## reboot

$ sudo reboot



## OverlayFS 확인 

$ lsmod | grep overlay


$ sudo mkdir -p /etc/systemd/system/docker.service.d && sudo tee /etc/systemd/system/docker.service.d/override.conf <<- EOF

[Service]

ExecStart=

ExecStart=/usr/bin/docker daemon --storage-driver=overlay -H fd:// \

-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375

EOF


$ sudo systemctl daemon-reload

$ sudo systemctl restart docker




## client 접속

## 서버 옵션을 tcp 로 설정했을 때 아래로 접속

export DOCKER_HOST=tcp://192.168.75.133:2375

$ docker ps



## 서버 옵션을 아무 옵션을 안주면 unix socket 으로 접속

docker -H unix:///var/run/docker.sock ps


$ export DOCKER_HOST=unix:///var/run/docker.sock

$ docker ps


## 환경변수로 추가

$ vi ~/.bashrc

export DOCKER_HOST=tcp://192.168.75.133:2375





## docker uninstall

$ yum list installed | grep docker

$ sudo yum -y remove docker-engine.x86_6


## image, 컨테이너까지 삭제

$ sudo rm -rf /var/lib/docker




# yum 으로 docker 설치 후 최신으로 업그레이드

https://get.docker.com 접속해서 참고




## docker run 및 overlayfs 보기, 스토리지 선택

## docker 스토리지에 대한 설명

http://play.joinc.co.kr/w/man/12/docker/storage


## https://docs.docker.com/engine/userguide/storagedriver/selectadriver/


## Docker image 빌드

$ docker build -t example/docker-node-hello:latest .

$ docker build --no-cache -t example/docker-node-hello:latest .



## Docker run

$ docker run -d -p 8090:8080 --name node-hello example/docker-node-hello:latest


$ docker run -d -p 8090:8080 -e WHO="Seungkyu Ahn" --name node-hello \

          example/docker-node-hello:latest



## docker 안으로 들어가서 mounts 정보 확인

$ docker exec -it 3c3ca0ce3470 /bin/bash


root@3c3ca0ce3470:/data/app# touch bbb.txt

root@3c3ca0ce3470:/data/app# cat /proc/mounts | grep overlay

lowerdir=/var/lib/docker/overlay/

cc4f0662e566f0ad9069abfd523ff67c38a41488aaaa06d474cb027ca64cafa2/root

upperdir=/var/lib/docker/overlay/

ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98/upper

workdir=/var/lib/docker/overlay/

ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98/work




## docker volume 확인

# cd /var/lib/docker/overlay

# cd ede9464970bb229267c8c548f8612e801002cec2d4f524378f5acb58ccde0d98

# ls -al

-rw-r--r--.  1 root root   64 Jul 11 03:22 lower-id

drwx------.  2 root root    6 Jul 11 03:22 merged

drwxr-xr-x.  9 root root 4096 Jul 11 03:26 upper

drwx------.  3 root root   17 Jul 11 03:22 work


# cat lower-id

cc4f0662e566f0ad9069abfd523ff67c38a41488aaaa06d474cb027ca64cafa2


# find . -name bbb.txt

./upper/data/app/bbb.txt



## overlay volume 의 구조

기본 image volume : lower-id 의 root directory

컨테이너 volume : upper directory




$ docker 소스 다운로드

go get github.com/docker/docker


## dependency (cmd/docker/docker.go)

$ go get github.com/Sirupsen/logrus


## dependency (cmd/dockerd/daemon.go)

$ go get github.com/docker/distribution

$ go get github.com/docker/go-connections



## docker Contributor 가 되고 싶으면 아래 URL 참조

## https://github.com/docker/docker/tree/master/project

## github 에서 docker/docker 프로젝트를 fork


git clone https://github.com/seungkyua/docker.git docker-fork

$ cd docker-fork


git config --local user.name "Seungkyu Ahn"

$ git config --local user.email "seungkyua@gmail.com"


$ git remote add upstream https://github.com/docker/docker.git


$ git config --local -l

$ git remote -v


$ git checkout -b dry-run-test


$ git branch

* dry-run-test

  master


$ touch TEST.md


$ git add TEST.md



## -s 옵션은 커밋 메세지에 정보를 자동으로 넣어 줌.

## Signed-off-by: Seungkyu Ahn <seungkyua@gmail.com>

## commit 로그에 들어가야할 내용

## 버그 수정일 때

fixes #xxxx,  closes #xxxx


**- What I did**

**- How I did it**

**- How to verify it**

**- Description for the changelog**

<!--

Write a short (one line) summary that describes the changes in this

pull request for inclusion in the changelog:

-->



$ git commit -s -m "Making a dry run test."


$ git push --set-upstream origin dry-run-test

Username for 'https://github.com': seungkyua

Password for 'https://seungkyua@github.com':




## Mac 에 docker 설치

## https://docs.docker.com/machine/install-machine/

$ docker-machine create --driver virtualbox default-docker

docker-machine ls

$ docker-machine env default-docker

$ eval "$(docker-machine env default-docker)"



## contribute 계속

## build a development environment image and run it in a container.

$ make shell


## In docker container, make docker binary

root@143823c11fba:/go/src/github.com/docker/docker# hack/make.sh binary


## binary 복사

# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin

# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin


## docker running background

# docker daemon -D&




## 다시 docker 로 접속하여 파일 수정 후 컴파일

# vi api/client/container/attach.go

42           flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN \

43           (standard in)")


# hack/make.sh binary

# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin

# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin

# docker daemon -D&


## 변경된 내용 확인

# docker attach --help



## 테스트 (arguements 에 따라 수행하는 테스트의 종류가 다름)

## test : Run the unit, integration and docker-py tests.

## test-unit : Run just the unit tests.

## test-integration-cli : Run the test for the integration command line interface.

## test-docker-py : Run the tests for Docker API client.

$ make test



## development container 안에서 테스트 하는 방법

$ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker \

docker-dev:dry-run-test /bin/bash


## hack/make.sh 를 활용하되 dynbinary binary cross 는 반드시 target 으로 지정

# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py

or unit test 만 수행

# hack/make.sh dynbinary binary cross test-unit



## Unit Test 수행

$ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit



## Integration Test 수행

$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli

or development container 안에서 테스트

# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli



## 이슈를 생성하고 local branch, remote repository, docker repository 를 맞추는 방법

## https://docs.docker.com/opensource/workflow/find-an-issue/

## 이슈에 labels 은 자신의 상황에 맞게 두 종류를 붙혀야 함

exp/beginner, exp/intermediate, exp/expert

kind/bug, kind/docs, kind/enhancement, kind/feature, kind/question


## issue 에 #dibs 라고 코멘트를 달면 자기가 하겠다는 뜻임.


## master 로 체크아웃

$ git checkout master


## docker repository 로 부터 최신 코드를 local 로 맞춤

git fetch upstream master

$ git rebase upstream/master


## local 최신 코드를 remote repository 에 맞춤

$ git push origin master


## 이슈번호 11038 에 맞는 branch 생성

$ git checkout -b 11038-fix-rhel-link


## 혹시 몰라 docker repository 의 최신코드를 branch 에 맞춤

$ git rebase upstream/master





[ docker 가 안 뜰 때 or 에러 일 때 깨끗하게 지우기 ]

systemctl status docker.service     


# mount 에러 일 때

du -h /var/lib/docker/

/var/lib/docker/container/ 아래의 파일을 삭제

/var/lib/docker/devicemapper/metadata/ 아래의 파일을 삭제

/var/lib/docker/devicemapper/mnt/ 아래의 파일을 삭제

/var/lib/docker/volumes/ 아래 파일을 삭제

/var/lib/docker/graph/ 아래 파일을 삭제

/var/lib/docker/linkgraph.db 파일 삭제


/var/run/docker.pid 삭제

/var/run/docker.sock 삭제


# device mapper 삭제

lsblk

grep docker /proc/*/mounts

systemd-cgls

dmsetup ls

ls -al /dev/mapper/docker-*      # 결과 리스트를 $dm 이라 한다면

umount $dm

dmsetup remove $dm




[ Debug 설정 ]

# /usr/lib/systemd/system/docker.service 을 수정하면

/etc/systemd/system/multi-user.target.wants/docker.service 와

/lib/systemd/system/docker.service 도 자동으로 수정됨


vi /usr/lib/systemd/system/docker.service

...

ExecStart=/bin/sh -c 'DEBUG=1 /usr/bin/docker daemon $OPTIONS \

...


sudo systemctl daemon-reload

sudo systemctl restart docker



[ system service 확인 ]

systemctl list-units --type service

systemctl list-unit-files



[ docker 설치 Test ]

docker run --rm -ti centos:latest /bin/bash



[ Sample Dockerfile ]

FROM node:0.10

MAINTAINER Anna Doe <anna@example.com>

LABEL "rating"="Five Stars" "class"="First Class"


USER root

ENV AP /data/app

ENV SCPATH /etc/supervisor/conf.d

RUN apt-get -y update


# The daemons

RUN apt-get -y install supervisor

RUN mkdir -p /var/log/supervisor

   

# Supervisor Configuration

ADD ./supervisord/conf.d/* $SCPATH/


# Application Code

ADD *.js* $AP/

WORKDIR $AP

RUN npm install

CMD ["supervisord", "-n"]



git clone https://github.com/spkane/docker-node-hello.git

cd docker-node-hello


tree -a -I .git







[ Docker Hub Registry ]

# 사용자 로그인

docker login


Username: seungkyua

Password: 

Email: seungkyua@gmail.com

WARNING: login credentials saved in /root/.docker/config.json

Login Succeeded


# 사용자 로그아웃

docker logout


# hub 에 push

docker tag example/docker-node-hello seungkyua/docker-node-hello



# restart 옵션

docker run -ti --restart=on-failure:3 -m 200m --memory-swap=300m \

     progrium/stress --cpu 2 --io 1 --vm 2 --vm-bytes 128M --timeout 120s



# stop

docker stop -t 25 node-hello       #  stop 은 SIGTERM,   25초 기다리고 t 옵션은 SIGKILL




[ container, image, volume 삭제하기 ] 

# delete all stopped docker

docker rm $(docker ps -a -q)


# delete untagged images

docker rmi $(docker images -q -f "dangling=true")


# delete volumes

docker volume rm $(docker volume ls -qf dangling=true)



[ Docker 정보 ]

docker version

docker info




[ docker inspect ]

docker pull ubuntu:latest

docker run -d -t --name ubuntu ubuntu /bin/bash

docker inspect node-hello

docker inspect --format='{{.State.Running}}' node-hello

docker inspect -f '{{.State.Pid}}' node-hello

docker inspect -f '{{.NetworkSettings.IPAddress}}' node-hello

docker inspect -f '{{.Name}} {{.State.Running}}' ubuntu node-hello


# list all port bindings

docker inspect -f '{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' node-hello


# specific port mapping

docker inspect -f '{{(index (index .NetworkSettings.Ports "8080/tcp") 0).HostPort}}' node-hello


# json print

docker inspect -f '{{json .Config}}' node-hello | python -mjson.tool



[ Docker 안으로 들어가는 두가지 방법 ]

docker exec -it ubuntu /bin/bash


docker inspect ubuntu | grep \"Pid\":

sudo nsenter --target [Pid] --mount --uts --ipc --net --pid



[ docker logs & stats ]

docker logs node-hello

docker stats node-hello


curl -s http://192.168.75.133:2375/v1.21/containers/node-hello/stats | head -1 | python -mjson.tool


# cAdvisor

docker run \

     --volume=/:/rootfs:ro \

     --volume=/var/run:/var/run:rw \

     --volume=/sys:/sys:ro \

     --volume=/var/lib/docker/:/var/lib/docker:ro \

     --publish=8091:8080 \

     --detach=true \

     --name=cadvisor \

     google/cadvisor:latest




[ ssh dockerfile ]

vi Dockerfile


FROM ubuntu:14.04

MAINTAINER Sven Dowideit <SvenDowideit@docker.com>

ENV REFRESHED_AT 2016-04-30


RUN apt-get update && apt-get install -y openssh-server

RUN mkdir /var/run/sshd

RUN echo 'root:screencast' | chpasswd

RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config


# SSH login fix. Otherwise user is kicked off after login

RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd


ENV NOTVISIBLE "in users profile"

RUN echo "export VISIBLE=now" >> /etc/profile


EXPOSE 22

CMD ["/usr/sbin/sshd", "-D"]



docker build -t example/sshd .

docker run -d -P --name sshd example/sshd

docker port sshd



# docker 이미지 생성과정 보기

docker history 3ae93df2b9a5



[ Dockerfile 명령어 설명 ]

CMD : 컨테이너를 런칭할 때 실행하는 명령어, 하나만 설정할 수 있음

          docker run -d --name sshd example/sshd /usr/sbin/sshd -D 와 같이 오버라이드 가능


ENTRYPOINT : docker run 의 마지막 인자를 활용하여 명령어를 오버라이드할 수 없음

                      ENTRYPOINT ["/usr/sbin/sshd"]

                      CMD ["-T"]

                     docker run 의 마지막 인자를 -D 로 주면 -T 가 오버라이드되어 포그라운드로 실행


WORKDIR : working directory 변경

                  WORKDIR /opt/webapp/db

                  docker run -it -w /var/log ubuntu pwd  도커실행 디렉토리가 /var/log 가 된다


ENV : 환경 변수를 설정하는데 사용, 그 다음 RUN 명령어를 위해 사용됨, 컨테이너에서 지속됨

         ENV RVM_PATH /home/rvm/

         RUN gem install unicorn

         RVM_PATH=/home/rvm/ gem install unicorn 와 같음

        docker run -it -e "WEB_PORT=8080" ubuntu env 를 하면 WEB_PORT가 설정되어 있음


USER : 이미지를 실행시키는 사용자

           USER nginx

           docker run -d -u nginx example/nginx 과 같이 -u 로 오버라이드 가능


VOLUME : 컨테이너에 볼륨을 추가한다. host 의 볼륨은 /var/lib/docker/volumes/ 여기에 위치

               여러 볼륨을 배열로 지정할 수 있다. 도커안에서 해당 위치로 볼륨 접근이 가능하다.

               VOLUME ["/opt/project", "/data" ]

               docker run -it -v /opt/project -v /data ubuntu /bin/bash 와 같이 -v 옵션과 동일


ADD : 파일과 디렉토리를 복사한다.

         ADD ../app /opt/

         host 빌드 디렉토리 위의 app 디렉토리를 컨테이너의 /opt/ 디렉토리에 복사한다.

         목적지가 / 로 끝나면 소스가 디렉토리라는 의미하고 목적지가 / 가 없으면 파일을 의미한다.

         ADD latest.tar.gz /var/www/wordpress/ 

         latest.tar.gz 압축파일을 /var/www/wordpress/ 디렉토리에 해제한다.

         목적 디렉토리에 같은 이름을 갖는 파일이나 디렉토리가 존재하면 덮어쓰지는 않는다.

         목적 경로가 존재하지 않으면 모드는 0755, UID와 GID 는 0으로 새롭게 생성된다. 


COPY : ADD와 비슷하나 빌드 디렉토리 밖의 파일을 복사 못하고 추출이나 압축해제 기능은 없다.

           COPY conf.d/ /etc/apache2/

           빌드 디렉토리 내의 파일, 디렉토리만 복사할 수 있으며, 파일시스템의 메타데이터도 복사

           UID 와 GID 는 0 로 된다.


ONBUILD : 이미지에 트리거를 추가한다. 새로운 명령어 빌드 과정에 삽입한다.

                ONBUILD ADD . /app/src

                ONBUILD RUN cd /app/src && make

                해당 이미지를 상속해서 새로운 이미지를 빌드할 때 자동으로 ADD 와 RUN 이 실행

                트리거는 한 번만 상속된다.




[ Dockerfile ]

# github 에서 다운로드

$ git clone https://github.com/jamtur01/dockerbook-code

$ cd dockerbook-code/code/5/website

$ docker build -t example/nginx .

$ docker run -d -p 80 --name website \

   -v $PWD/website:/var/www/html/webiste:ro example/nginx nginx


or



# nginx

$ vi Dockerfile

FROM ubuntu:14.04

MAINTAINER James Turnbull "james@example.com"

ENV REFRESHED_AT 2014-06-01


RUN apt-get update

RUN apt-get -y -q install nginx


RUN mkdir -p /var/www/html/website

ADD nginx/global.conf /etc/nginx/conf.d/

ADD nginx/nginx.conf /etc/nginx/


EXPOSE 80




$ vi nginx/global.conf

server {

        listen          0.0.0.0:80;

        server_name     _;


        root            /var/www/html/website;

        index           index.html index.htm;


        access_log      /var/log/nginx/default_access.log;

        error_log       /var/log/nginx/default_error.log;

}





$ vi nginx/nginx.conf

user www-data;

worker_processes 4;

pid /run/nginx.pid;

daemon off;


events {  }


http {

  sendfile on;

  tcp_nopush on;

  tcp_nodelay on;

  keepalive_timeout 65;

  types_hash_max_size 2048;

  include /etc/nginx/mime.types;

  default_type application/octet-stream;

  access_log /var/log/nginx/access.log;

  error_log /var/log/nginx/error.log;

  gzip on;

  gzip_disable "msie6";

  include /etc/nginx/conf.d/*.conf;

}



$ docker build -t exmaple/nginx .


$ docker history -H --no-trunc=true 3e1cdbcccf11


$ mkdir website; cd website

$ wget https://raw.githubusercontent.com/jamtur01/dockerbook-\

code/master/code/5/website/website/index.html


$ cd ..

$ docker run -d -p 80 --name website \

   -v $PWD/website:/var/www/html/webiste:ro example/nginx nginx





Jenkins by 구교준 (@Bliexsoft)

$ cd ~/Documents/Docker

$ git clone https://github.com/jenkinsci/docker.git jenkinsci


## 아래  Maven 설치 추가

$ vi Dockerfile

67 # Install Maven - Start

68 USER root

69 

70 ENV MAVEN_VERSION 3.3.9

71 

72 RUN mkdir -p /usr/share/maven \

   && curl -fsSL http://apache.osuosl.org/maven/maven-3/$MAVEN_VERSION/\
binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz \

       | tar -xzC /usr/share/maven --strip-components=1 \

         && ln -s /usr/share/maven/bin/mvn /usr/bin/mvn

76 

77         ENV MAVEN_HOME /usr/share/maven

78 

79         VOLUME /root/.m2

80 # Install Maven - End

81 

82 # Setting Jenkins

83 USER jenkins

84 

85 COPY config.xml /var/jenkins_home/config.xml

86 COPY hudson.tasks.Maven.xml /var/jenkins_home/hudson.tasks.Maven.xml

87 

88 COPY plugins.txt /usr/share/jenkins/ref/

89 RUN /usr/local/bin/plugins.sh /usr/share/jenkins/ref/plugins.txt




$ vi config.xml

1 <?xml version='1.0' encoding='UTF-8'?>

2 <hudson>

3   <disabledAdministrativeMonitors/>

4   <version>1.651.2</version>

5   <numExecutors>2</numExecutors>

6   <mode>NORMAL</mode>

7   <useSecurity>true</useSecurity>

8   <authorizationStrategy 

                   class="hudson.security.AuthorizationStrategy$Unsecured"/>

9   <securityRealm class="hudson.security.SecurityRealm$None"/>

10   <disableRememberMe>false</disableRememberMe>

11   <projectNamingStrategy 

       class="jenkins.model.ProjectNamingStrategy$DefaultProjectNamingStrategy"/>

12   <workspaceDir>${JENKINS_HOME}/workspace/${ITEM_FULLNAME}

       </workspaceDir>

13   <buildsDir>${ITEM_ROOTDIR}/builds</buildsDir>

14   <jdks>

15     <jdk>

16       <name>jdk8</name>

17       <home>/usr/lib/jvm/java-8-openjdk-amd64</home>

18       <properties/>

19     </jdk>

20   </jdks>

21   <viewsTabBar class="hudson.views.DefaultViewsTabBar"/>

22   <myViewsTabBar class="hudson.views.DefaultMyViewsTabBar"/>

23   <clouds/>

24   <quietPeriod>5</quietPeriod>

25   <scmCheckoutRetryCount>0</scmCheckoutRetryCount>

26   <views>

27     <hudson.model.AllView>

28       <owner class="hudson" reference="../../.."/>

29       <name>All</name>

30       <filterExecutors>false</filterExecutors>

31       <filterQueue>false</filterQueue>

32       <properties class="hudson.model.View$PropertyList"/>

33     </hudson.model.AllView>

34   </views>

35   <primaryView>All</primaryView>

36   <slaveAgentPort>50000</slaveAgentPort>

37   <label></label>

38   <nodeProperties/>

39   <globalNodeProperties/>

40 </hudson>



$ vi hudson.tasks.Maven.xml

1 <?xml version='1.0' encoding='UTF-8'?>

2 <hudson.tasks.Maven_-DescriptorImpl>

3     <installations>

4         <hudson.tasks.Maven_-MavenInstallation>

5             <name>maven3.3.9</name>

6             <home>/usr/share/maven</home>

7             <properties/>

8         </hudson.tasks.Maven_-MavenInstallation>

9     </installations>

10 </hudson.tasks.Maven_-DescriptorImpl>



$ vi plugins.txt

maven-plugin:2.13

credentials:2.0.7

plain-credentials:1.1

token-macro:1.12.1

cloudfoundry:1.5

klocwork:1.18

ssh-credentials:1.11

matrix-project:1.6

mailer:1.16

scm-api:1.0

promoted-builds:2.25

parameterized-trigger:2.4

git-client:1.19.6

git:2.4.4

github-api:1.75

github:1.19.1










[ Docker in docker ]

https://github.com/jpetazzo/dind




[ Jenkins ]

$ cd dockerbook-code/code/5/jenkins

$ vi Dockerfile

FROM ubuntu:14.04

MAINTAINER james@example.com

ENV REFRESHED_AT 2014-06-01


RUN apt-get update -qq && apt-get install -qqy curl apt-transport-https

RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \

--recv-keys 58118E89F3A912897C070ADBF76221572C52609D

RUN echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > \

/etc/apt/sources.list.d/docker.list

RUN apt-get update -qq && \

apt-get install -qqy iptables ca-certificates openjdk-7-jdk git-core docker-engine


ENV JENKINS_HOME /opt/jenkins/data

ENV JENKINS_MIRROR http://mirrors.jenkins-ci.org


RUN mkdir -p $JENKINS_HOME/plugins

RUN curl -sf -o /opt/jenkins/jenkins.war -L \

$JENKINS_MIRROR/war-stable/latest/jenkins.war


RUN for plugin in chucknorris greenballs scm-api git-client git ws-cleanup ;\

    do curl -sf -o $JENKINS_HOME/plugins/${plugin}.hpi \

       -L $JENKINS_MIRROR/plugins/${plugin}/latest/${plugin}.hpi ; done


ADD ./dockerjenkins.sh /usr/local/bin/dockerjenkins.sh

RUN chmod +x /usr/local/bin/dockerjenkins.sh


VOLUME /var/lib/docker


EXPOSE 8080


ENTRYPOINT [ "/usr/local/bin/dockerjenkins.sh" ]




$ vi dockerjenkins.sh

#!/bin/bash


# First, make sure that cgroups are mounted correctly.

CGROUP=/sys/fs/cgroup


[ -d $CGROUP ] ||

  mkdir $CGROUP


mountpoint -q $CGROUP ||

  mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {

    echo "Could not make a tmpfs mount. Did you use -privileged?"

    exit 1

  }


# Mount the cgroup hierarchies exactly as they are in the parent system.

for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)

do

  [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS

  mountpoint -q $CGROUP/$SUBSYS ||

    mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS

done


# Now, close extraneous file descriptors.

pushd /proc/self/fd

for FD in *

do

  case "$FD" in

  # Keep stdin/stdout/stderr

  [012])

    ;;

  # Nuke everything else

  *)

    eval exec "$FD>&-"

    ;;

  esac

done

popd


docker daemon &

exec java -jar /opt/jenkins/jenkins.war







$ docker build -t example/dockerjenkins .

$ docker run -p 8080:8080 --name jenkins --privileged -d example/dockerjenkins




## 다른 Host 의 docker daemon 에 접속하기 (cert 는 안해도 됨)

## centos

export DOCKER_HOST=tcp://192.168.75.133:2375



## boot2docker

$ export DOCKER_HOST=tcp://192.168.59.103:2376

export DOCKER_TLS_VERIFY=1 

export DOCKER_CERT_PATH=/Users/ahnsk/.boot2docker/certs/boot2docker-vm











Posted by Kubernetes Korea co-leader seungkyua@gmail.com

ca-key.pem -> ca.pem

server-key.pem -> server.csr -> server.csr + (ca-key.pem + ca.pem) -> server.cert

client-key.pem -> client.csr -> client.csr + (ca-key.pem + ca.pem) -> client.cert



[ CA 생성 ]


1. ca-key.pem => ca.pem    (ca.crt: client ca 파일)

$ sudo mkdir -p /etc/docker

$ cd /etc/docker

$ echo 01 | sudo tee ca.srl


$ sudo openssl genrsa -des3 -out ca-key.pem

Enter pass phrase for ca-key.pem:

Verifying - Enter pass phrase for ca-key.pem:


$ sudo openssl req -new -days 365 -key ca-key.pem -out ca.pem

Enter pass phrase for ca-key.pem:

...

Common Name (e.g. server FQDN or Your name) []: *         (ex : www.ahnseungkyu.com)



[ Server Cert 생성 ]


1. server-key.pem => server.csr    (Common Name : e.g. server FQDN 이 중요)

$ sudo openssl genrsa -des3 -out server-key.pem

Enter pass phrase for server-key.pem:

Verifying - Enter pass phrase for server-key.pem:


$ sudo openssl req -new -key server-key.pem -out server.csr

Enter pass phrase for server-key.pem:

...

Common Name (e.g. server FQDN or Your name) []: *         (ex : www.ahnseungkyu.com)


2. ca-key.pem + ca.pem + server.csr => server-cert.pem (server.cert: 서버 cert 파일)

$ sudo openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem -out server-cert.pem

Enter pass phrase for ca-key.pem:


3. server-key.pem 의 phrase 를 삭제 (server.key: 서버 private key 파일)

$ sudo openssl rsa -in server-key.pem -out server-key.pem

Enter pass phrase for server-key.pem:

writing RSA key


4. 퍼미션 수정

$ sudo chmod 600 /etc/docker/server-key.pem /etc/docker/server-cert.pem /etc/docker/ca-key.pem /etc/docker/ca.pem




[ Docker 데몬 설정 ]


Ubuntu, Debian : /etc/default/docker

RHEL, Fedora    : /etc/sysconfig/docker

systemd 버전     : /usr/lib/systemd/system/docker.service




[ systemd Docker Server 실행 ]


ExecStart=/usr/bin/docker -d -H tcp://0.0.0.0.2376 --tlsverify --tlscacert=/etc/docker/ca.pem --tlscert=/etc/docker/server-cert.pem --tlskey=/etc/docker/server-key.pem


[ Docker 데몬 reload 및 재시작 필요 ]

$ sudo systemctl --system daemon-reload




[ Client Cert 생성 ]


1. client-key.pem => client.csr

$ sudo openssl genrsa -des3 -out client-key.pem

Enter pass phrase for client-key.pem:

Verifying - Enter pass phrase for client-key.pem:


sudo openssl req -new -key client-key.pem -out client.csr

Enter pass phrase for client-key.pem:

...

Common Name (e.g. server FQDN or Your name) []:



2. Client 인증 속성 추가

$ echo extendedKeyUsage = clientAuth > extfile.cnf



3. ca-key.pem + ca.pem + client.csr => client-cert.pem

$ sudo openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem -out client-cert.pem -extfile extfile.cnf

Enter pass phrase for ca-key.pem:



4. client-key 의 phrase 를 삭제

$ sudo openssl rsa -in client-key.pem -out client-key.pem

Enter pass phrase for client-key.pem:

writing RSA key




[ Docker 클라이언트에 ssl 설정 ]


$ mkdir -p ~/.docker

$ cp ca.pem ~/.docker/ca.pem

$ ca client-key.pem ~/.docker/key.pem

$ ca client-cert.pem ~/.docker/cert.pem

$ chmod 600 ~/.docker/key.pem ~/.docker/cert.pem


# docker 연결 테스트

$ sudo docker -H=docker.example.com:2376 --tlsverify info



# server

# sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem \

--tlskey=server-key.pem -H=0.0.0.0:4243


# client -- note that this uses --tls instead of --tlsverify, which I had trouble with 

# docker --tls --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \

-H=dns-name-of-docker-host:4243









Posted by Kubernetes Korea co-leader seungkyua@gmail.com
TAG ca, CERT, docker, SSL, TLS

https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/connecting-applications.md


사전 Docker Registry 를 만든 다음에

http://www.ahnseungkyu.com/206


1. tomcat RC 생성

$ cd Documents/registry/tomcat


$ vi tomcat8-rc.yaml


apiVersion: v1

kind: ReplicationController

metadata:

  name: tomcat8

  labels:

    name: tomcat8

spec:

  replicas: 1

  selector:

    name: tomcat8

  template:

    metadata:

      labels:

        name: tomcat8

    spec:

      containers:

      - name: tomcat8

        image: privateregistry.com:5000/tomcat-jre8:8.0.30

        ports:

        - containerPort: 8080



$ kubectl -s 192.168.230.211:8080 create -f tomcat8-rc.yaml


$ kubectl -s 192.168.230.211:8080 get rc tomcat8                    # 조회


2. tomcat service 생성

$ vi tomcat8-svc.yaml


apiVersion: v1

kind: Service

metadata:

  labels:

    name: tomcat8

  name: tomcat8

spec:

  ports:

    # the port that this service should serve on

    - port: 8088                     # Service 자신의 포트

      targetPort: 8080             # pod 내 컨테이너 포트

      nodePort: 30001

  # label keys and values that must match in order to receive traffic for this service

  selector:                            # 뒷단의 pod 와 연계

    name: tomcat8

  type: NodePort


$ kubectl create -f tomcat8-svc.yaml



[ 서비스 확인 ]

http://192.168.75.212:30001/

http://192.168.75.213:30001/



$ kubectl describe pod tomcat8-5pchl

$ kubectl get rc

$ kubectl describe rc tomcat8

$ kubectl get service

$ kubectl describe service tomcat8


$ kubectl get endpoints


$ $ kubectl get event



[ label 로 조회하기 ]

$ kubectl get service -a -l name=tomcat8

$ kubectl get pods -l name=tomcat8 -o json | grep podIP


[ 전체 조회하기 ]

$ kubectl get --all-namespaces -a service


[ container 안으로 들어가기 ]

$ kubectl exec [ pod 명 ] -c [ Container 명 ] -i -t -- bash -il

$ kubectl exec tomcat8-5pchl -c tomcat8 -i -t -- bash -il


[ Built-in 서비스 확인 ]

$ kubectl cluster-info



[ 어떻게 접근하는지 ]

$ kubectl describe svc tomcat8


Name: tomcat8

Namespace: default

Labels: name=tomcat8

Selector:         name=tomcat8

Type: NodePort

IP:         192.168.230.17                         # Service ip

Port:         <unnamed> 8088/TCP          # Service port

NodePort:         <unnamed> 30001/TCP

Endpoints:         172.16.84.4:8080                       #  Pod ip, port

Session Affinity: None

No events.


# node01 혹은 node02 에서 서비스 IP 포트로 접속 가능

curl -k http://192.168.230.17:8088


# node01 혹은 node02 에서 pod 에 직접 호출

$ kubectl get pods -o json | grep -i podip

$ curl -k http://172.16.84.4:8080




$ kubectl exec tomcat8-5pchl -- printenv | grep SERVICE

KUBERNETES_SERVICE_HOST=192.168.230.1

KUBERNETES_SERVICE_PORT=443

KUBERNETES_SERVICE_PORT_HTTPS=443


$ kubectl scale rc tomcat8 --replicas=0; kubectl scale rc tomcat8 --replicas=2


$ kubectl get pods -l name=tomcat8 -o wide

NAME            READY     STATUS    RESTARTS   AGE       NODE

tomcat8-dqvcu   1/1       Running   0          35s       192.168.75.212

tomcat8-sppk6   1/1       Running   0          35s       192.168.75.212


$ kubectl exec tomcat8-dqvcu -- printenv | grep SERVICE

KUBERNETES_SERVICE_PORT=443

TOMCAT8_SERVICE_PORT=8088

KUBERNETES_SERVICE_HOST=192.168.230.1

KUBERNETES_SERVICE_PORT_HTTPS=443

TOMCAT8_SERVICE_HOST=192.168.230.17


3. DNS 확인

$ vi curlpod.yaml


apiVersion: v1

kind: Pod

metadata:

  labels:

    name: curlpod

  name: curlpod

spec:

  containers:

  - image: radial/busyboxplus:curl

    command:

      - sleep

      - "3600"

    imagePullPolicy: IfNotPresent

    name: curlcontainer

  restartPolicy: Always



$ kubectl create -f curlpod.yaml

kubectl describe pod curlpod



[ DNS 확인 ]

$ kubectl exec curlpod -- nslookup tomcat8

kubectl exec curlpod -- curl http://tomcat8:8088



$ kubectl exec curlpod -c curlcontainer -it -- /bin/sh -il



4. 각 인스턴스 pod 에 접속

https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/accessing-the-cluster.md#accessing-services-running-on-the-cluster


$ kubectl get pods


http://192.168.75.211:8080/api/v1/proxy/namespaces/default/pods/tomcat8-dqvcu/


# docker id 조회

docker ps -l -q












Posted by Kubernetes Korea co-leader seungkyua@gmail.com

먼저 kubernetes cluster 를 설치 해야 함

http://www.ahnseungkyu.com/200


1. Create self-signed certificate

$ cd Documents

$ mkdir registry

$ cd registry


$ mkdir -p certs && openssl req \

-newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \

-x509 -days 36500 -out certs/domain.crt


Country Name (2 letter code) [AU]:  

State or Province Name (full name) [Some-State]:

Locality Name (eg, city) []:

Organization Name (eg, company) [Internet Widgits Pty Ltd]:

Organizational Unit Name (eg, section) []:

Common Name (e.g. server FQDN or YOUR name) []:privateregistry.com

Email Address []:



2. 패스워드 파일 생성 (이건 나중에)

$ mkdir -p auth

$ docker run --entrypoint htpasswd registry:2 -Bbn test test > auth/htpasswd



3. cert 파일 복사

$ vi deployCert.sh

#!/bin/bash


FQDN=privateregistry.com


echo $FQDN


sudo mkdir -p /etc/docker/certs.d/$FQDN

sudo cp certs/domain.crt /etc/docker/certs.d/$FQDN/ca.crt


sudo mkdir -p /opt/docker_volumes/registry/$FQDN

sudo mkdir -p /opt/docker_volumes/registry/$FQDN/data

sudo cp -r certs /opt/docker_volumes/registry/$FQDN


$ ./deployCert.sh



# Ubuntu 에 Cert 설치

$ sudo cp /home/stack/Documents/registry/certs/domain.crt /usr/local/share/ca-certificates/.

$ sudo update-ca-certificates


# docker restart

$ sudo service docker restart



# node01, node02 에도 cert 복사

$ sudo mkdir -p /etc/docker/privateregistry.com

$ sudo cp /home/stack/Documents/registry/certs/domain.crt /etc/docker/privateregistry.com/ca.crt


$ sudo cp /home/stack/Documents/registry/certs/domain.crt /usr/local/share/ca-certificates/.

$ sudo update-ca-certificates


# docker restart

$ sudo service docker restart


$ sudo vi /etc/hosts

192.168.75.211  privateregistry.com


4. Registry 생성

# docker-compose 로 실행

# root 권한으로 변환해서 docker-compose 설치

$ sudo su -

# curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose


$ vi kube-registry.yml


kube-registry:

  container_name: kube-registry

  restart: always

  image: registry:2

  ports:

    - 5000:5000

  environment:

    REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt

    REGISTRY_HTTP_TLS_KEY: /certs/domain.key

    REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /var/lib/registry

  volumes:

    - /opt/docker_volumes/registry/privateregistry.com/data:/var/lib/registry

    - /opt/docker_volumes/registry/privateregistry.com/certs:/certs


$ docker-compose -f kube-registry.yml up -d



# docker run 으로 실행

docker run -d -p 5000:5000 --restart=always --name kube-registry \

  -v `pwd`/certs:/certs \

  -v /opt/docker_volumes/registry/privateregistry.com/data:/var/lib/registry \

  -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/var/lib/registry \

  -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \

  -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \

  registry:2


5. Registry 확인

https://192.168.230.211:5000/v2/_catalog


# node01 확인

$ sudo vi /etc/hosts

192.168.75.211  privateregistry.com


$ docker pull ubuntu

$ docker tag ubuntu privateregistry.com:5000/ubuntu

$ docker push privateregistry.com:5000/ubuntu


# master 에서 확인

$ docker pull privateregistry.com:5000/ubuntu


6. Registry 삭제

docker stop kube-registry && docker rm kube-registry


7. image 조회

docker images privateregistry.com:5000



8. Tomcat8 Docker file 만들기

$ mkdir -p tomcat

$ cd tomcat

$ vi Dockerfile


FROM java:8-jre


ENV CATALINA_HOME /usr/local/tomcat

ENV PATH $CATALINA_HOME/bin:$PATH

RUN mkdir -p "$CATALINA_HOME"

WORKDIR $CATALINA_HOME


# runtime dependency for Tomcat Native Libraries

RUN apt-get update && apt-get install -y libapr1 && rm -rf /var/lib/apt/lists/*


# see https://www.apache.org/dist/tomcat/tomcat-8/KEYS

RUN set -ex \

&& for key in \

05AB33110949707C93A279E3D3EFE6B686867BA6 \

07E48665A34DCAFAE522E5E6266191C37C037D42 \

47309207D818FFD8DCD3F83F1931D684307A10A5 \

541FBE7D8F78B25E055DDEE13C370389288584E7 \

61B832AC2F1C5A90F0F9B00A1C506407564C17A3 \

79F7026C690BAA50B92CD8B66A3AD3F4F22C4FED \

9BA44C2621385CB966EBA586F72C284D731FABEE \

A27677289986DB50844682F8ACB77FC2E86E29AC \

A9C5DF4D22E99998D9875A5110C01C5A2F6059E7 \

DCFD35E0BF8CA7344752DE8B6FB21E8933C60243 \

F3A04C595DB5B6A5F1ECA43E3B7BBB100D811BBE \

F7DA48BB64BCB84ECBA7EE6935CD23C10D498E23 \

; do \

gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; \

done


ENV TOMCAT_MAJOR 8

ENV TOMCAT_VERSION 8.5.0

ENV TOMCAT_TGZ_URL https://www.apache.org/dist/tomcat/tomcat-$TOMCAT_MAJOR/v$TOMCAT_VERSION/bin/apache-tomcat-$TOMCAT_VERSION.tar.gz


# Tomcat Native 1.2+ requires a newer version of OpenSSL than debian:jessie has available (1.0.2g+)

# see http://tomcat.10.x6.nabble.com/VOTE-Release-Apache-Tomcat-8-0-32-tp5046007p5046024.html (and following discussion)


RUN set -x \

\

&& curl -fSL "$TOMCAT_TGZ_URL" -o tomcat.tar.gz \

&& curl -fSL "$TOMCAT_TGZ_URL.asc" -o tomcat.tar.gz.asc \

&& gpg --batch --verify tomcat.tar.gz.asc tomcat.tar.gz \

&& tar -xvf tomcat.tar.gz --strip-components=1 \

&& rm bin/*.bat \

&& rm tomcat.tar.gz* \

\

&& nativeBuildDir="$(mktemp -d)" \

&& tar -xvf bin/tomcat-native.tar.gz -C "$nativeBuildDir" --strip-components=1 \

&& nativeBuildDeps=" \

gcc \

libapr1-dev \

libssl-dev \

make \

openjdk-${JAVA_VERSION%%[-~bu]*}-jdk=$JAVA_DEBIAN_VERSION \

" \

&& apt-get update && apt-get install -y --no-install-recommends $nativeBuildDeps && rm -rf /var/lib/apt/lists/* \

&& ( \

export CATALINA_HOME="$PWD" \

&& cd "$nativeBuildDir/native" \

&& [ "$(openssl version | cut -d' ' -f2)" = '1.0.1k' ] \

# http://tomcat.10.x6.nabble.com/VOTE-Release-Apache-Tomcat-8-0-32-tp5046007p5048274.html (ie, HACK HACK HACK)

&& cp src/sslcontext.c src/sslcontext.c.orig \

&& awk ' \

/^    eckey = EC_KEY_new_by_curve_name/ { print "    EC_KEY *eckey = NULL;" } \

{ print } \

' src/sslcontext.c.orig > src/sslcontext.c \

&& ./configure \

--libdir=/usr/lib/jni \

--prefix="$CATALINA_HOME" \

--with-apr=/usr/bin/apr-1-config \

--with-java-home="$(docker-java-home)" \

--with-ssl=yes \

&& make -j$(nproc) \

&& make install \

) \

&& apt-get purge -y --auto-remove $nativeBuildDeps \

&& rm -rf "$nativeBuildDir" \

&& rm bin/tomcat-native.tar.gz


# verify Tomcat Native is working properly

RUN set -e \

&& nativeLines="$(catalina.sh configtest 2>&1)" \

&& nativeLines="$(echo "$nativeLines" | grep 'Apache Tomcat Native')" \

&& nativeLines="$(echo "$nativeLines" | sort -u)" \

&& if ! echo "$nativeLines" | grep 'INFO: Loaded APR based Apache Tomcat Native library' >&2; then \

echo >&2 "$nativeLines"; \

exit 1; \

fi


EXPOSE 8080

CMD ["catalina.sh", "run"]


$ docker build -tag tomcat-jre8:8 .                # 처음 이미지는 . 을 추가할 수 없음

docker tag tomcat-jre8:8 tomcat-jre8:8.5.0         # 태그에 . 을 추가

$ docker rmi tomcat-jre8:8                          # 처음 태그를 삭제



# 태그이름을 리모트로 변경

docker tag tomcat-jre8:8.5.0 privateregistry.com:5000/tomcat-jre8:8.5.0


# 태그 이름이 리모트이므로 리모트로 올리게 됨

$ docker push privateregistry.com:5000/tomcat-jre8:8.0.30



# node01 에서 확인

$ docker pull privateregistry.com:5000/tomcat-jre8:8.0.30



$ https://192.168.230.211:5000/v2/tomcat-jre8/tags/list

$ curl https://privateregistry.com:5000/v2/tomcat-jre8/tags/list











Posted by Kubernetes Korea co-leader seungkyua@gmail.com

 0. 서버 설정

Master   : 192.168.75.211  (etcd, kube-apiserver, kube-controller-manager, kube-scheduler)

Node01  : 192.168.75.212  (kube-proxy, kubelet)

Node02  : 192.168.75.213  (kube-proxy, kubelet)


etcd-2.2.1, flannel-0.5.5, k8s-1.1.2



[ Master Node 서버에 모두 설치 ]

1. apt-get 으로 필요 s/w 설치

# docker 설치

$ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D

$ sudo vi /etc/apt/sources.list.d/docker.list


# Debian Jessie

#deb https://apt.dockerproject.org/repo debian-jessie main


# Debian Stretch/Sid

#deb https://apt.dockerproject.org/repo debian-stretch main


# Ubuntu Precise

#deb https://apt.dockerproject.org/repo ubuntu-precise main


# Ubuntu Trusty (14.04 LTS)

deb https://apt.dockerproject.org/repo ubuntu-trusty main


# Ubuntu Utopic (14.10)

#deb https://apt.dockerproject.org/repo ubuntu-utopic main


# Ubuntu Vivid (15.04)

#deb https://apt.dockerproject.org/repo ubuntu-vivid main


# Ubuntu Wily (15.10)

#deb https://apt.dockerproject.org/repo ubuntu-wily main


$ sudo apt-get update

$ sudo apt-get purge lxc-docker*

$ sudo apt-get purge docker.io

$ sudo apt-get autoremove

$ sudo apt-get install docker-engine


$ sudo apt-get install bridge-utils

sudo usermod -a -G docker stack      # stack user에 docker 그룹을 추가

$ sudo service docker restart



2. sudo 세팅

# gpasswd -a stack sudo   (이건 안되는데??)

stack   ALL=(ALL:ALL) NOPASSWD: ALL



3. ntp 설치 & ssh 키 설치

# ssh 로 master <-> Node 사이에 stack 계정으로 바로 접속할 수 있어야 함

# ssh 로 master, Node 각각 자기 서버 내에서 stack 계정에서 root 계정으로 바로 접속할 수 있어야 함



4. host 세팅

192.168.75.211    master

192.168.75.212    node01

192.168.75.213    node02



5. Go 설치

1. 다운로드

$ cd /home/stack/downloads

wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz

sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz


2. 환경변수 세팅

sudo vi /etc/profile

export GOROOT=/usr/local/go

export PATH=$PATH:/usr/local/go/bin


sudo visudo             # sudo 에서도 go path가 적용될려면 여기에 세팅

Defaults    env_reset

Defaults    env_keep += "GOPATH"

Defaults        secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin"


$ cd

vi .bash_profile

export GOPATH=$HOME/Documents/go_workspace:$HOME/Documents/go_workspace/src/k8s.io/kubernetes/Godeps/_workspace

export PATH=$HOME/Documents/go_workspace/bin:$PATH



6. kubernetes 설치

# go 로 다운로드하기

$ go get k8s.io/kubernetes   # git clone https://github.com/kubernetes/kubernetes.git


$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes

$ git checkout -b v1.1.2 tags/v1.1.2

$ make all                                      # _output 디렉토리에 결과 파일이 생성


# 소스 수정 후 make 로 재빌드 (참고)   _output 디렉토리에 결과 파일이 생성

$ make all WHAT=plugin/cmd/kube-scheduler GOFLAGS=-v      # scheduler

$ make all WHAT=cmd/kubelet GOFLAGS=-v                           # kubelet

$ make all WHAT=cmd/kube-apiserver GOFLAGS=-v                # apiserver


# 소스 수정 후 재빌드 (참고)

$ hack/build-go.sh                  # make를 돌리면 build-go.sh 가 수행됨
$ hack/local-up-cluster.sh        # 로컬 클러스터를 생성할 때


$ sudo su -

# cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cluster/ubuntu


# export KUBE_VERSION=1.1.2

# export FLANNEL_VERSION=0.5.5

# export ETCD_VERSION=2.2.1


# ./build.sh                 # binaries 디렉토리에 다운 받음

# exit



$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cluster/ubuntu

$ vi config-default.sh


export nodes="stack@192.168.75.211 stack@192.168.75.212"

export role="a i"

export NUM_MINIONS=${NUM_MINIONS:-1}

export SERVICE_CLUSTER_IP_RANGE=192.168.230.0/24

export FLANNEL_NET=172.16.0.0/16



ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"

DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.230.10"}

DNS_DOMAIN="cluster.local"

DNS_REPLICAS=1


ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"


$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cluster

$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh


# 복사한 파일

make-ca-cert.sh    

reconfDocker.sh    

config-default.sh    

util.sh    

kube-scheduler.conf    

kube-apiserver.conf    

etcd.conf    

kube-controller-manager.conf    

flanneld.conf    

kube-controller-manager    

kube-scheduler    

etcd    

kube-apiserver    

flanneld    

kube-controller-manager    

etcdctl    

kube-scheduler    

etcd    

kube-apiserver    

flanneld



# kubectl 복사

$ sudo cp ubuntu/binaries/kubectl /opt/bin/.


# 경로 추가

$ vi ~/.bash_profile

export PATH=/opt/bin:$PATH

export KUBECTL_PATH=/opt/bin/kubectl



# Add-on 설치

$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cluster/ubuntu

$ KUBERNETES_PROVIDER=ubuntu ./deployAddons.sh


# 에러 발생하면 아래 실행 (Docker image 를 다운로드 함)

$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes

./build/run.sh hack/build-cross.sh


# Add-on 설치 다시

cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cluster/ubuntu

$ KUBERNETES_PROVIDER=ubuntu ./deployAddons.sh



[ Kubernetes 설치 지우기 ]

$ cd ..

$ KUBERNETES_PROVIDER=ubuntu ./kube-down.sh


# node01 에 떠 있는 docker 삭제하기

docker ps -a | awk '{print $1}' | xargs docker stop

docker ps -a | awk '{print $1}' | xargs docker rm

$ sudo cp ubuntu/binaries/kubectl /opt/bin/.                # kubectl 을 /opt/bin 에 복사해야 함


$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh



[ Master의 Docker를 flannel 로 연결 ]

sudo service docker stop

$ sudo ip link set dev docker0 down

$ sudo brctl delbr docker0

$ cat /run/flannel/subnet.env      # flannel의 subnet 과 mtu 값을 확인한다.

$ sudo vi /etc/default/docker

DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=172.16.25.1/24 --mtu=1472"


$ sudo service docker start

$ sudo ip link set dev docker0 up



# node01 에서 docker ps -a 로 가비지가 많이 쌓임. 지워주면 됨

# ssh node01 로 접속하여 가비지 조회

docker ps -a | grep Exited | awk '{print $1}'

docker ps -a | grep Exited | awk '{print $1}' | xargs docker rm


# kubernetes volume 생성되는 곳 : /var/lib/kubelet/pods

# kubernetes garbage-collection https://github.com/kubernetes/kubernetes/blob/master/docs/admin/garbage-collection.md


$ kubectl get nodes

$ kubectl get pods --namespace=kube-system         # add-on pods 확인

$ kubectl cluster-info


# Skydns Pod 정보 보기

kubectl describe pod kube-dns-v9-549av --namespace=kube-system


# DNS 확인

$ kubectl create -f busybox.yaml


$ vi busybox.yaml

apiVersion: v1

kind: Pod

metadata:

  name: busybox

  namespace: default

spec:

  containers:

  - image: busybox

    command:

      - sleep

      - "3600"

    imagePullPolicy: IfNotPresent

    name: busybox

  restartPolicy: Always


$ kubectl get pods busybox


kubectl exec Pod명 [-c Container명] -i -t -- COMMAND [args..] [flags]

$ kubectl exec busybox -- nslookup kubernetes.default


# busybox 삭제하기

$ kubectl delete -f busybox.yaml



# 웹화면 확인

http://192.168.75.211:8080/


# UI 확인

http://192.168.75.211:8080/ui    >> 아래 화면으로 리다이렉션 됨

http://192.168.75.211:8080/api/v1/proxy/namespaces/kube-system/services/kube-ui



# Mac 에서 소스 개발하고 Master 에 소스 커밋하기  (참고)

# 원격에 tag 에도 v1.1.2 가 있고 branch 에도 v1.1.2 가 있으면 remote branch 를 지정

# git push [저장소] (local branch명:)remote branch명

git push origin refs/heads/v1.1.2


git config --global user.name "Seungkyu Ahn" 

git config --global user.email "seungkyua@gmail.com"


# 로컬 파일을 Master 서버로 복사

$ vi ~/bin/cmaster.sh

#!/bin/bash


function change_directory {

  cd /Users/ahnsk/Documents/go_workspace/src/k8s.io/kubernetes

}


change_directory

files=$(git status | grep -E 'modified|new file' | awk -F':' '{print$2}')


for file in $files; do

    scp $file stack@192.168.230.211:/home/stack/Documents/go_workspace/src/k8s.io/kubernetes/$file

done



# kube-apiserver 소스로 띄우기

$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cmd/kube-apiserver


sudo -E go run apiserver.go --insecure-bind-address=0.0.0.0 --insecure-port=8080 --etcd-servers=http://127.0.0.1:4001 --logtostderr=true --service-cluster-ip-range=192.168.230.0/24 --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,DenyEscalatingExec,SecurityContextDeny --service-node-port-range=30000-32767 --client-ca-file=/srv/kubernetes/ca.crt --tls-cert-file=/srv/kubernetes/server.cert --tls-private-key-file=/srv/kubernetes/server.key 



# Document 만들기

$ cd ~/Documents/go_workspace/src/k8s.io/kubernetes/cmd/genkubedocs

$ mkdir -p temp

$  go run gen_kube_docs.go temp kube-apiserver



7. Sample App 올려보기

https://github.com/kubernetes/kubernetes/tree/master/examples/guestbook


# 디렉토리 위치는 kubernetes 설치한 위치

$ sudo kubectl create -f examples/guestbook/redis-master-controller.yaml

$ sudo kubectl get rc

$ sudo kubectl get pods

$ sudo kubectl describe pods/redis-master-xssrd

$ sudo kubectl logs <pod_name>          # container log 확인


$ sudo kubectl create -f examples/guestbook/redis-master-service.yaml

$ sudo kubectl get services


$ sudo kubectl create -f examples/guestbook/redis-slave-controller.yaml

$ sudo kubectl get rc

$ sudo kubectl get pods


$ sudo kubectl create -f examples/guestbook/redis-slave-service.yaml

$ sudo kubectl get services



$ sudo kubectl create -f examples/guestbook/frontend-controller.yaml

$ sudo kubectl get rc

$ sudo kubectl get pods



$ sudo kubectl create -f examples/guestbook/frontend-service.yaml

$ sudo kubectl get services





sudo kubectl describe services frontend

$ sudo kubectl get ep


# dns 보기

$ sudo kubectl get services kube-dns --namespace=kube-system


# 환경변수 보기

$ sudo kubectl get pods -o json

$ sudo kubectl get pods -o wide

$ sudo kubectl exec frontend-cyite -- printenv | grep SERVICE


8. Sample App 삭제

$ sudo kubectl stop rc -l "name in (redis-master, redis-slave, frontend)"

$ sudo kubectl delete service -l "name in (redis-master, redis-slave, frontend)"



# Network

TAP : vm과 eth0 (physical port) 와 연결할 때 사용. tap <-> bridge <-> eth0 로 됨

VETH : docker <-> bridge,  docker <-> OVS, bridge <-> OVS 를 연결할 때 사용


# interconnecting namespaces

http://www.opencloudblog.com/?p=66



# Docker <-> veth 알아내기

$ vi veth.sh


#!/bin/bash


set -o errexit

set -o nounset

#set -o pipefail


VETHS=`ifconfig -a | grep "Link encap" | sed 's/ .*//g' | grep veth`

DOCKERS=$(docker ps -a | grep Up | awk '{print $1}')


for VETH in $VETHS

do

  PEER_IFINDEX=`ethtool -S $VETH 2>/dev/null | grep peer_ifindex | sed 's/ *peer_ifindex: *//g'`

  for DOCKER in $DOCKERS

  do

    PEER_IF=`docker exec $DOCKER ip link list 2>/dev/null | grep "^$PEER_IFINDEX:" | awk '{print $2}' | sed 's/:.*//g'`

    if [ -z "$PEER_IF" ]; then

      continue

    else

      printf "%-10s is paired with %-10s on %-20s\n" $VETH $PEER_IF $DOCKER

      break

    fi

  done

done






Posted by Kubernetes Korea co-leader seungkyua@gmail.com

docker ssh + git

Container 2015.08.13 15:20

1. docker 설치하기

# docker 설치

$ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D

$ sudo vi /etc/apt/sources.list.d/docker.list


# Debian Jessie

#deb https://apt.dockerproject.org/repo debian-jessie main


# Debian Stretch/Sid

#deb https://apt.dockerproject.org/repo debian-stretch main


# Ubuntu Precise

#deb https://apt.dockerproject.org/repo ubuntu-precise main


# Ubuntu Trusty (14.04 LTS)

deb https://apt.dockerproject.org/repo ubuntu-trusty main


# Ubuntu Utopic (14.10)

#deb https://apt.dockerproject.org/repo ubuntu-utopic main


# Ubuntu Vivid (15.04)

#deb https://apt.dockerproject.org/repo ubuntu-vivid main


# Ubuntu Wily (15.10)

#deb https://apt.dockerproject.org/repo ubuntu-wily main


$ sudo apt-get update

$ sudo apt-get purge lxc-docker*

$ sudo apt-get purge docker.io

$ sudo apt-get autoremove

$ sudo apt-get install docker-engine


$ sudo apt-get install bridge-utils

$ sudo usermod -a -G docker stack      # stack user에 docker 그룹을 추가

$ sudo service docker restart


# Mac 에서 Docker 설치하기

$ ruby -e \

"$(curl -fsSL \ https://raw.githubusercontent.com/Homebrew/install/master/install)"


$ brew update

$ brew install caskroom/cask/brew-cask


$ brew cask install virtualbox

$ brew install docker

$ brew install boot2docker


$ boot2docker init

$ boot2docker up


To connect the Docker client to the Docker daemon, please set:

    export DOCKER_HOST=tcp://192.168.59.103:2376

    export DOCKER_CERT_PATH=/Users/ahnsk/.boot2docker/certs/boot2docker-vm

    export DOCKER_TLS_VERIFY=1


$ $(boot2docker shellinit)       # 환경변수 세팅


$ docker info

$ boot2docker ssh                 # vm 접속

$ boot2docker ip                   # vm ip


$ docker run --rm -ti ubuntu:latest /bin/bash        # ubuntu 이미지 테스트

$ docker run --rm -ti fedora:latest /bin/bash         # fedora 이미지 테스트

$ docker run --rm -ti centos:latest /bin/bash         # centos 이미지 테스트


# Upgrade the Boot2docker VM image

$ boot2docker stop

$ boot2docker download

$ boot2docker up


$ boot2docker delete


# Docker Hub 로그인

$ docker login


Username: seungkyua

Password: 

Email: seungkyua@gmail.com


$  cat ~/.docker/config.json


$ docker logout


# Docker Registry 를 insecure 로 변경


# boot2docker

sudo touch /var/lib/boot2docker/profile

$ sudo vi /var/lib/boot2docker/profile

EXTRA_ARGS="--insecure-registry 192.168.59.103:5000"

sudo /etc/init.d/docker restart


# Ubuntu

$ sudo vi /etc/default/docker

DOCKER_OPTS="--insecure-registry 192.168.59.103:5000"

$ sudo service docker restart


# Fedora

$ sudo vi /etc/sysconfig/docker

OPTIONS="--insecure-registry 192.168.59.103:5000"

$ sudo systemctl daemon-reload

$ sudo systemctl restart docker


# CoreOS

$ sudo cp /usr/lib/systemd/system/docker.service /etc/systemd/system/

$ sudo vi  /etc/systemd/system/docker.service

ExecStart=/usr/lib/coreos/dockerd --daemon --host=fd:// \

$DOCKER_OPTS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ \

--insecure-registry 192.168.59.103:5000

$ sudo systemctl daemon-reload

$ sudo systemctl restart docker


# Local Registry 띄우기

$ sudo mkdir -p /var/lib/registry

$ docker run -d -p 5000:5000 \

-v /var/lib/registry:/var/lib/registry \

--restart=always --name registry registry:2



# 테스트

$ docker pull ubuntu

$ docker tag ubuntu 192.168.59.103:5000/ubuntu


$ docker push 192.168.59.103:5000/ubuntu

$ docker pull 192.168.59.103:5000/ubuntu


$ docker stop registry

$ docker rm -v registry




2. docker file 만들기

# mkdir docker

# cd docker

# mkdir git-ssh

# cd git-ssh

# vi Dockerfile

FROM ubuntu:14.04


RUN apt-get -y update

RUN apt-get -y install openssh-server

RUN apt-get -y install git


# Setting openssh

RUN mkdir /var/run/sshd

RUN sed -i "s/#PasswordAuthentication yes/PasswordAuthentication no/" /etc/ssh/sshd_config


# Adding git user

RUN adduser --system git

RUN mkdir -p /home/git/.ssh


# Clearing and setting authorized ssh keys

RUN echo '' > /home/git/.ssh/authorized_keys

RUN echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTFEBrNfpSIvgz7mZ+I96/UqKFCxcouoiDDS9/XPNB1Tn7LykgvHHaR5mrPOQIJ/xTFhSVWpwsmEvTLdv3QJYLB5P+UfrjY5fUmiYgGpKKr5ym2Yua2wykHgQYdT4+lLhyq3BKbnG9vgc/FQlaCWntLckJfAYnHIGYWl1yooMAOka0/pOeJ+hPF0TxLQtrjoVJWiaHLVnB8qgPiCgvSyKROvW6cs1AhY9abasUWrQ5eNsLLMY1rDWccantMjVlcUdDZuPzI4g+/MtfE3IAs7JxtmwMvCMFRMuzWTtZkZSVyqpEGDeLnPGgMNTYUwaxQhlJLtcYnNTqdyZr8ZCcz3zP stephen@Stephenui-MacBook-Pro.local' >> /home/git/.ssh/authorized_keys


# Updating shell to bash

RUN sed -i s#/home/git:/bin/false#/home/git:/bin/bash# /etc/passwd


EXPOSE 22

CMD ["/usr/sbin/sshd", "-D"]

docker build -t git-ssh-img .

docker run --name git-ssh -d -p 1234:22 git-ssh-img


3. docker container bash로 접속

docker run -i -t --rm --net='host' ubuntu:14.04 bash


3. docker container 접속

docker exec -it <containerIdOrName> bash


4. docker 모든 컨테이너 보기

# docker ps -a


5. 모든 컨테이너 삭제

docker ps -a | awk '{print $1}' | grep -v CONTAINER | xargs sudo docker rm


6. docker 모든 <none> 이미지 삭제

docker images | grep "<none>" | awk '{print $3}' | xargs sudo docker rmi


7. 이미지 조회 및 실행

$ sudo docker search ubuntu

sudo docker run --name myssh -d -p 4444:22 rastasheep/ubuntu-sshd


8. stack 사용자 docker 그룹 권한 추가

$ sudo usermod -aG docker stack

$ sudo service docker restart

$ 재로그인


9. docker 이미지 가져오기

$ docker pull ubuntu:lates


10. docker bash쉘로 실행 및 빠져나오기기

docker run -i -t --name hello ubuntu /bin/bash

root@bb97e5f57596:/#


Ctrl + p, Ctrl + q        => 멈추지 않고 빠져나오기


$ docker attach hello            => 다시 접속하기 (enter를 한번 쳐야 함)


11. nginx 설치하기

# mkdir data


# vi Dockerfile

FROM ubuntu:14.04.3


RUN apt-get update

RUN apt-get install -y nginx

RUN echo "\ndaemon off;" >> /etc/nginx/nginx.conf

RUN chown -R www-data:www-data /var/lib/nginx


VOLUME ["/data", "/etc/nginx/site-enabled", "/var/log/nginx"]


WORKDIR /etc/nginx


CMD ["nginx"]


EXPOSE 80

EXPOSE 443


# docker build -t nginx:0.1 .

docker run --name hello-nginx -d -p 2080:80 -v /root/data:/data nginx:0.1



11. 파일 꺼내서 보기

# docker cp hello-nginx:/etc/nginx/nginx.conf ./


12. 컨테이러를 이미지로 생성

# docker commit -a "aaa <aaa@aaa.com>" -m "Initial commit" hello-nginx nginx:0.2


13. 이미지와 컨테이너 변경사항 보기

# docker diff 컨테이너ID

# docker history 이미지ID


14. 컨테이너 내부 보기

# docker inspect hello-nginx


15. docker 컨테이너의 pid 알아내기

docker inspect -f '{{.State.Pid}}' containerID


16. Docker 끼리 point to point 통신하기 (도커마다 네임스페이스를 만들어서 VETH 로 연결)

https://docs.docker.com/v1.7/articles/networking/#building-your-own-bridge


$ docker run -i -t --rm --net=none base /bin/bash

root@1f1f4c1f931a:/#


$ docker run -i -t --rm --net=none base /bin/bash

root@12e343489d2f:/#


# Learn the container process IDs

# and create their namespace entries


$ docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a

2989

$ docker inspect -f '{{.State.Pid}}' 12e343489d2f

3004

$ sudo mkdir -p /var/run/netns

$ sudo ln -s /proc/2989/ns/net /var/run/netns/2989

$ sudo ln -s /proc/3004/ns/net /var/run/netns/3004


# Create the "peer" interfaces and hand them out


$ sudo ip link add A type veth peer name B


$ sudo ip link set A netns 2989

$ sudo ip netns exec 2989 ip addr add 10.1.1.1/32 dev A

$ sudo ip netns exec 2989 ip link set A up

$ sudo ip netns exec 2989 ip route add 10.1.1.2/32 dev A


$ sudo ip link set B netns 3004

$ sudo ip netns exec 3004 ip addr add 10.1.1.2/32 dev B

$ sudo ip netns exec 3004 ip link set B up

$ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B



# ssh 다른 샘플

FROM ubuntu:14.04

RUN echo "deb http://archive.ubuntu.com/ubuntu/ trusty main universe" > /etc/apt/sources.list

RUN apt-get update


RUN apt-get install -y openssh-server

RUN mkdir /var/run/sshd

RUN echo 'root:screencast' | chpasswd


EXPOSE 22

CMD /usr/sbin/sshd -D



# NodeJS 샘플

git clone https://github.com/spkane/docker-node-hello.git

cd docker-node-hello


$ brew install tree

tree -a -I .git             # Directory 를 tree 구조로 봄


docker build --no-cache -t example/docker-node-hello:latest .

$ docker run -d -p 8081:8080 example/docker-node-hello:latest    # host 8081, docker 8080


$ echo $DOCKER_HOST


$ docker stop DOCKER_ID


# -e 옵션으로 env 넘기기

$ docker run -d -p 8081:8080 -e WHO="Seungkyu Ahn" example/docker-node-hello:latest


$ docker inspect DOCKER_ID









Posted by Kubernetes Korea co-leader seungkyua@gmail.com

0. 서버 설정

Master : 192.168.75.129  (etcd, flannel, kube-apiserver, kube-controller-manager, kube-scheduler)

Node   : 192.168.75.130  (flannel, kube-proxy, kubelet)


gpasswd -a stack sudo  (? 안되는데??)


0. Kubernetes 소스 다운로드 및 WebStorm 지정

# 소스 다운로드

Go 설치 및 패스 (http://ahnseungkyu.com/204)

$ cd ~/Documents/go_workspace/src

$ go get k8s.io/kubernetes


$ cd k8s.io/kubernetes

$ git checkout -b v1.1.2 tags/v1.1.2


# WebStorm  New Project 로 Go 프로젝트 생성

경로 : ~/Documents/go_workspace/src/k8s.io/kubernetes


# WebStorm >> Preferences >> Languages & Frameworks >> Go >> Go SDK 에 추가

Path : /usr/local/go


# WebStorm >> Preferences >> Languages & Frameworks >> Go >> Go Libraries >> Project libraries 에 아래 경로 추가

경로 : Documents/go_workspace/src/k8s.io/kubernetes/Godeps/_workspace



[ Master Minion 서버에 모두 설치 ]

1. apt-get 으로 필요 s/w 설치

# docker 설치

$ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D

$ sudo vi /etc/apt/sources.list.d/docker.list


# Debian Jessie

deb https://apt.dockerproject.org/repo debian-jessie main


# Debian Stretch/Sid

deb https://apt.dockerproject.org/repo debian-stretch main


# Ubuntu Precise

deb https://apt.dockerproject.org/repo ubuntu-precise main


# Ubuntu Trusty (14.04 LTS)

deb https://apt.dockerproject.org/repo ubuntu-trusty main


# Ubuntu Utopic (14.10)

deb https://apt.dockerproject.org/repo ubuntu-utopic main


# Ubuntu Vivid (15.04)

deb https://apt.dockerproject.org/repo ubuntu-vivid main


# Ubuntu Wily (15.10)

deb https://apt.dockerproject.org/repo ubuntu-wily main


# Ubuntu Xenial (16.04)

deb https://apt.dockerproject.org/repo ubuntu-xenial main


$ sudo apt-get update

$ sudo apt-get purge lxc-docker*

$ sudo apt-get purge docker.io

$ sudo apt-get autoremove

$ sudo apt-get install docker-engine


$ sudo apt-get install bridge-utils

$ sudo apt-get install curl

$ sudo usermod -a -G docker stack      # stack user에 docker 그룹을 추가

$ sudo systemctl start docker.service



2. go apt-get 설치

$ sudo apt-get install linux-libc-dev golang gcc

$ sudo apt-get install ansible



3. host 파일 등록 (모든 서버에, root 계정으로 수행)

echo "192.168.75.129 kube-master

192.168.75.130 kube-node01" >> /etc/hosts



[ Kubernetes Master 설치 ]


4. etcd 설치

https://github.com/coreos/etcd/releases

curl -L  https://github.com/coreos/etcd/releases/download/v2.2.2/etcd-v2.2.2-linux-amd64.tar.gz -o etcd-v2.2.2-linux-amd64.tar.gz

$ tar xzvf etcd-v2.2.2-linux-amd64.tar.gz

$ sudo cp -f etcd-v2.2.2-linux-amd64/etcd /usr/bin

$ sudo cp -f etcd-v2.2.2-linux-amd64/etcdctl /usr/bin


$ sudo mkdir -p /var/lib/etcd/member

$ sudo chmod -R 777 /var/lib/etcd


$ sudo vi /etc/network-environment

# The master's IPv4 address - reachable by the kubernetes nodes.

NODE_NAME=kube-master

MASTER_NAME=kube-master

NODE_NAME_01=kube-node01


sudo vi /lib/systemd/system/etcd.service

[Unit]

Description=etcd

After=network-online.service


[Service]

EnvironmentFile=/etc/network-environment          # 혹은 /etc/default/etcd.conf

PermissionsStartOnly=true

ExecStart=/usr/bin/etcd \

--name ${NODE_NAME} \

--data-dir /var/lib/etcd \

--initial-advertise-peer-urls http://192.168.75.129:2380 \

--listen-peer-urls http://192.168.75.129:2380 \

--listen-client-urls http://192.168.75.129:2379,http://127.0.0.1:2379 \

--advertise-client-urls http://192.168.75.129:2379 \

--initial-cluster-token etcd-cluster-1 \

--initial-cluster ${MASTER_NAME}=http://kube-master:2380,${NODE_NAME_01}=http://kube-node01:2380 \

--initial-cluster-state new

Restart=always

RestartSec=10s


[Install]

WantedBy=multi-user.target

Alias=etcd.service


$ cd /lib/systemd/system

$ sudo chmod 775 etcd.service


$ sudo systemctl enable etcd.service

sudo systemctl daemon-reload                        # 파일 수정 후에는 reload 필요

$ sudo systemctl start etcd.service



$ etcdctl set /coreos.com/network/config "{\"Network\":\"172.16.0.0/16\"}"

$ etcdctl set /coreos.com/network/subnets/172.16.10.0-24 "{\"PublicIP\":\"192.168.75.129\"}"

$ etcdctl set /coreos.com/network/subnets/172.16.93.0-24 "{\"PublicIP\":\"192.168.75.130\"}"


$ etcdctl ls /                          # etcdctl ls --recursive (전체 다 보임)

/coreos.com/network/config

/coreos.com/network/subnets/172.16.10.0-24

/coreos.com/network/subnets/172.16.93.0-24

/registry


$ etcdctl get /coreos.com/network/config

{"Network":"172.16.0.0/16"}


$ etcdctl get /coreos.com/network/subnets/172.16.10.0-24     # Master의 flannel0 bridge ip

{"PublicIP":"192.168.75.129"}


$ etcdctl get /coreos.com/network/subnets/172.16.93.0-24     # Node01의 flannel0 bridge ip

{"PublicIP":"192.168.75.130"}



5. flannel 설치
$ git clone https://github.com/coreos/flannel.git

$ cd flannel

$ git checkout -b v0.5.4 tags/v0.5.4     # git checkout -b release-0.5.4 origin/release-0.5.4

$ ./build                   # bin 디렉토리가 생기면서 flanneld 실행파일이 빌드됨 

$ sudo cp -f bin/flanneld /usr/bin/.


$ sudo netstat -tulpn | grep etcd          # etcd 떠 있는 포트를 확인

sudo flanneld -etcd-endpoints=http://kube-master:4001 -v=0


$ cd /lib/systemd/system

$ sudo vi flanneld.service


[Unit]

Description=flanneld Service

After=etcd.service

Requires=etcd.service


[Service]

EnvironmentFile=/etc/network-environment

PermissionsStartOnly=true

User=root

ExecStart=/usr/bin/flanneld \

-etcd-endpoints http://localhost:4001,http://localhost:2379 \

-v=0

Restart=always

RestartSec=10s

RemainAfterExit=yes


[Install]

WantedBy=multi-user.target

Alias=flanneld.service



$ sudo systemctl enable flanneld.service

$ sudo systemctl start flanneld.service



6. Kubernetes API Server 설치

$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git

$ cd kubernetes

git checkout -b release-1.1 origin/release-1.1

$ sudo make release


$ cd _output/release-tars

$ sudo tar zxvf kubernetes-server-linux-amd64.tar.gz


$ cd ~

git clone https://github.com/kubernetes/contrib.git

$ sudo cp -R ~/downloads/kubernetes/_output/* ~/downloads/contrib/ansible/roles/

$ cd ~/downloads/contrib/ansible/roles

$ sudo chown stack.stack -R *

$ vi  ~/downloads/contrib/ansible/inventory

[masters]

kube-master


[etcd]

kube-master


[nodes]

kube-node01



$ sudo su -

# ssh-keygen

# for node in kube-master kube-node01; do

ssh-copy-id ${node}

done

# exit


$ vi ~/downloads/contrib/ansible/group_vars/all.yml

source_type: localBuild

cluster_name: cluster.local

ansible_ssh_user: root

kube_service_addresses: 10.254.0.0/16

networking: flannel

flannel_subnet: 172.16.0.0

flannel_prefix: 12

flannel_host_prefix: 24

cluster_logging: true

cluster_monitoring: true

kube-ui: true

dns_setup: true

dns_replicas: 1


$ cd ~/downloads/contrib/ansible

$ ./setup.sh








sudo cp kubernetes/server/bin/kube-apiserver /usr/bin

$ sudo cp kubernetes/server/bin/kube-controller-manager /usr/bin

$ sudo cp kubernetes/server/bin/kube-scheduler /usr/bin

sudo cp kubernetes/server/bin/kubectl /usr/bin

sudo cp kubernetes/server/bin/kubernetes /usr/bin


sudo mkdir -p /var/log/kubernetes

$ sudo chown -R stack.docker /var/log/kubernetes/


$ cd /lib/systemd/system

$ sudo vi kube-apiserver.service


[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

Requires=etcd.service

After=etcd.service


[Service]

EnvironmentFile=/etc/network-environment

ExecStart=/usr/bin/kube-apiserver \

--api-rate=10 \

--bind-address=0.0.0.0 \

--etcd_servers=http://127.0.0.1:4001 \

--portal_net=10.254.0.0/16 \                              # 어디서 쓰는 거지?

--insecure-bind-address=0.0.0.0 \

--log-dir=/var/log/kubernetes \

--logtostderr=true \

--kubelet_port=10250 \

--service_account_key_file=/tmp/kube-serviceaccount.key \

--service_account_lookup=false \

--service-cluster-ip-range=172.16.0.0/16            # flannel 과 연동해야 하나?

Restart=always

RestartSec=10


[Install]

WantedBy=multi-user.target

Alias=kube-apiserver.service


$ sudo systemctl enable kube-apiserver.service

$ sudo systemctl start kube-apiserver.service


sudo systemctl daemon-reload                        # 파일 수정 후에는 reload 필요

$ sudo systemctl restart kube-apiserver


6. Kubernetes Controller Manager 설치

$ cd /lib/systemd/system

sudo vi kube-controller-manager.service


[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

Requires=etcd.service

After=etcd.service


[Service]

ExecStart=/usr/bin/kube-controller-manager \

--address=0.0.0.0 \

--master=127.0.0.1:8080 \

--log-dir=/var/log/kubernetes \

--logtostderr=true 

#--service_account_private_key_file=/tmp/kube-serviceaccount.key

Restart=always

RestartSec=10


[Install]

WantedBy=multi-user.target

Alias=kube-controller-manager.service


$ sudo systemctl enable kube-controller-manager.service

$ sudo systemctl start kube-controller-manager.service


$ sudo systemctl daemon-reload

$ sudo systemctl restart kube-controller-manager


7. Kubernetes Scheduler 설치

$ cd /lib/systemd/system

sudo vi kube-scheduler.service


[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

Requires=etcd.service

After=etcd.service


[Service]

ExecStart=/usr/bin/kube-scheduler \

--master=127.0.0.1:8080 \

--log-dir=/var/log/kubernetes \

--logtostderr=true

Restart=always

RestartSec=10


[Install]

WantedBy=multi-user.target

Alias=kube-scheduler.service


sudo systemctl enable kube-scheduler.service

$ sudo systemctl start kube-scheduler.service


8. etcd 에 flannel 에서 사용할 ip range 등록  (flannel 을 node 에서 사용해야 필요함)

$ sudo etcdctl mk /coreos.com/network/config '{"Network":"172.17.0.0/16"}'



[ Service Cluster IP Range ]

10.0.0.0 - 10.255.255.255 (10/8 prefix)

172.16.0.0 - 172.31.255.255 (172.16/12 prefix)

192.168.0.0 - 192.168.255.255 (192.168/16 prefix)




[ Kubernetes Minion 설치 ]


4. etcd 설치

https://github.com/coreos/etcd/releases

curl -L  https://github.com/coreos/etcd/releases/download/v2.2.2/etcd-v2.2.2-linux-amd64.tar.gz -o etcd-v2.2.2-linux-amd64.tar.gz

$ tar xzvf etcd-v2.2.2-linux-amd64.tar.gz

$ sudo cp -f etcd-v2.2.2-linux-amd64/etcd /usr/bin

$ sudo cp -f etcd-v2.2.2-linux-amd64/etcdctl /usr/bin


$ sudo mkdir -p /var/lib/etcd/member

$ sudo chmod -R 777 /var/lib/etcd


$ sudo vi /etc/network-environment

# The master's IPv4 address - reachable by the kubernetes nodes.

NODE_NAME=kube-node01

MASTER_NAME=kube-master

NODE_NAME_01=kube-node01


sudo vi /lib/systemd/system/etcd.service

[Unit]

Description=etcd

After=network-online.service


[Service]

EnvironmentFile=/etc/network-environment          # 혹은 /etc/default/etcd.conf

PermissionsStartOnly=true

ExecStart=/usr/bin/etcd \

--name ${NODE_NAME} \

--data-dir /var/lib/etcd \

--initial-advertise-peer-urls http://192.168.75.130:2380 \

--listen-peer-urls http://192.168.75.130:2380 \

--listen-client-urls http://192.168.75.130:2379,http://127.0.0.1:2379 \

--advertise-client-urls http://192.168.75.130:2379 \

--initial-cluster-token etcd-cluster-1 \

--initial-cluster ${MASTER_NAME}=http://kube-master:2380,${NODE_NAME_01}=http://kube-node01:2380 \

--initial-cluster-state new

Restart=always

RestartSec=10s


[Install]

WantedBy=multi-user.target

Alias=etcd.service


$ cd /lib/systemd/system

$ sudo chmod 775 etcd.service


$ sudo systemctl enable etcd.service

sudo systemctl daemon-reload                        # 파일 수정 후에는 reload 필요

$ sudo systemctl start etcd.service


$ etcdctl member list


5. flannel 설치
$ git clone https://github.com/coreos/flannel.git

$ cd flannel

$ git checkout -b v0.5.5 tags/v0.5.5     # git checkout -b release-0.5.4 origin/release-0.5.4

$ ./build                   # bin 디렉토리가 생기면서 flanneld 실행파일이 빌드됨 

$ sudo cp -f bin/flanneld /usr/bin/.


$ sudo netstat -tulpn | grep etcd          # etcd 떠 있는 포트를 확인

sudo flanneld -etcd-endpoints=http://kube-node01:4001,http://kube-node01:2379 -v=0


$ cd /lib/systemd/system

$ sudo vi flanneld.service


[Unit]

Description=flanneld Service

After=etcd.service

Requires=etcd.service


[Service]

EnvironmentFile=/etc/network-environment

PermissionsStartOnly=true

User=root

ExecStart=/usr/bin/flanneld \

-etcd-endpoints http://kube-node01:4001,http://kube-node01:2379 \

-v=0

Restart=always

RestartSec=10s

RemainAfterExit=yes


[Install]

WantedBy=multi-user.target

Alias=flanneld.service



$ sudo systemctl enable flanneld.service

$ sudo systemctl start flanneld.service




8. Kubernetes Proxy 설치

$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git

$ cd kubernetes

git checkout -b release-1.0 origin/release-1.0

$ sudo make release


$ cd _output/release-tars

$ sudo tar xvf kubernetes-server-linux-amd64.tar.gz


sudo cp kubernetes/server/bin/kube-proxy /usr/bin

$ sudo cp kubernetes/server/bin/kubelet /usr/bin

sudo cp kubernetes/server/bin/kubectl /usr/bin

sudo cp kubernetes/server/bin/kubernetes /usr/bin


sudo mkdir -p /var/log/kubernetes

$ sudo chown -R stack.docker /var/log/kubernetes/


$ cd /lib/systemd/system

sudo vi kube-proxy.service


[Unit]

Description=Kubernetes Proxy

Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]

ExecStart=/usr/bin/kube-proxy \

--master=http://kube-master:8080 \

--log-dir=/var/log/kubernetes \

--logtostderr=true \

--v=0                                                     # debug 모드

Restart=always

RestartSec=10


[Install]

WantedBy=multi-user.target

Alias=kube-proxy.service


$ sudo systemctl enable kube-proxy.service

$ sudo systemctl start kube-proxy.service



9. Kubernetes Kubelet 설치

$ cd /lib/systemd/system

sudo vi kubelet.service


[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]

ExecStart=/usr/bin/kubelet \

--address=0.0.0.0 \

--port=10250 \

--hostname_override=kube-minion \

--api_servers=http://kube-master:8080 \

--log-dir=/var/log/kubernetes \

--logtostderr=true \

--cluster_domain=cluster.local \

--v=0                                                      # debug 모드

Restart=always

RestartSec=10


[Install]

WantedBy=multi-user.target

Alias=kubelet.service


$ sudo systemctl enable kubelet.service

$ sudo systemctl start kubelet.service


# docker 서비스 restart

$ sudo service docker restart

10. flannel 설치 (etcd 의 Network 등 설정 값을 가지고 옴) - 동작 확인 필요
$ git clone https://github.com/coreos/flannel.git

$ cd flannel

$ git checkout -b v0.5.1 tags/v0.5.1     # git checkout -b release-0.5.4 origin/release-0.5.4

$ ./build                   # bin 디렉토리가 생기면서 flanneld 실행파일이 빌드됨 

$ sudo cp -f bin/flanneld /usr/bin/.


sudo flanneld -etcd-endpoints=http://kube-master:4001 -v=0



10. 설치한 node 확인

sudo kubectl get nodes


NAME                 LABELS                                                    STATUS

192.168.75.202   kubernetes.io/hostname=192.168.75.202    NotReady

kube-minion        kubernetes.io/hostname=kube-minion         Ready


11. 서비스 올리기

# Master 서버

$ sudo systemctl start etcd.service

$ sudo systemctl start kube-apiserver.service

$ sudo systemctl start kube-controller-manager.service

$ sudo systemctl start kube-scheduler.service


# Minion 서버

$ sudo systemctl start kube-proxy.service

$ sudo systemctl start kubelet.service



12. mysql 서비스 올리기

mkdir pods

$ pods

$ vi mysql.yaml

apiVersion: v1

kind: Pod

metadata:

  name: mysql

  labels:

    name: mysql

spec:

  containers:

    - resources:

        limits :

          cpu: 1

      image: mysql

      name: mysql

      env:

        - name: MYSQL_ROOT_PASSWORD

          # change this

          value: root

      ports:

        - containerPort: 3306

          name: mysql


$ sudo kubectl create -f mysql.yaml

$ sudo kubectl get pods


$ vi mysql-service.yaml

apiVersion: v1

kind: Service

metadata:

  labels:

    name: mysql

  name: mysql

spec:

  publicIPs:

    - 192.168.75.202

  ports:

    # the port that this service should serve on

    - port: 3306

  # label keys and values that must match in order to receive traffic for this service

  selector:

    name: mysql


$ sudo kubectl create -f mysql-service.yaml

$ sudo kubectl get services







**************************************************

*****  juju 로 설치  (실패)                               ***********

**************************************************

1. juju 설치

sudo add-apt-repository ppa:juju/stable

$ sudo apt-get update

$ sudo apt-get install juju-core juju-quickstart

juju quickstart u/kubernetes/kubernetes-cluster












**************************************************

*****  여기는 참고                                          ***********

**************************************************


3. flannel 설치

$ git clone https://github.com/coreos/flannel.git

$ cd flannel

$ git checkout -b v0.5.1 tags/v0.5.1

$ ./build                   # bin 디렉토리가 생기면서 flanneld 실행파일이 빌드됨 

$ cp bin/flanneld /opt/bin




4. etcd 설치

https://github.com/coreos/etcd/releases

$ curl -L  https://github.com/coreos/etcd/releases/download/v2.1.1/etcd-v2.1.1-linux-amd64.tar.gz -o etcd-v2.1.1-linux-amd64.tar.gz

$ tar xzvf etcd-v2.1.1-linux-amd64.tar.gz

$ sudo cp  etcd-v2.1.1-linux-amd64/bin/etcd* /opt/bin

$ cd /var/lib

$ sudo mkdir etcd

$ sudo chown stack.docker etcd

sudo mkdir /var/run/kubernetes

$ sudo chown stack.docker /var/run/kubernetes

sudo vi /etc/default/etcd

ETCD_NAME=default

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"



3. Kubernetes Master 설치

$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git

$ cd kubernetes

git checkout -b release-1.0 origin/release-1.0

$ cd cluster/ubuntu/

$ ./build.sh            # binaries 디렉토리로 다운로드 함


# Add binaries to /usr/bin

$ sudo cp -f binaries/master/* /usr/bin

$ sudo cp -f binaries/kubectl /usr/bin


$ wget https://github.com/Metaswitch/calico-kubernetes-ubuntu-demo/archive/master.tar.gz

$ tar -xvf master.tar.gz

$ sudo cp -f calico-kubernetes-ubuntu-demo-master/master/*.service /etc/systemd


$ cp calico-kubernetes-ubuntu-demo-master/node/network-environment-template network-environment

$ vi network-environment

#! /usr/bin/bash

# This node's IPv4 address

DEFAULT_IPV4=192.168.75.201


# The kubernetes master IP

KUBERNETES_MASTER=192.168.75.201


# Location of etcd cluster used by Calico.  By default, this uses the etcd

# instance running on the Kubernetes Master

ETCD_AUTHORITY=192.168.75.201:4001


# The kubernetes-apiserver location - used by the calico plugin

KUBE_API_ROOT=https://192.168.75.201:443/api/v1/


$ sudo mv -f network-environment /etc



$ sudo systemctl enable /etc/systemd/etcd.service

$ sudo systemctl enable /etc/systemd/kube-apiserver.service

$ sudo systemctl enable /etc/systemd/kube-controller-manager.service

$ sudo systemctl enable /etc/systemd/kube-scheduler.service


$ sudo systemctl start etcd.service

$ sudo systemctl start kube-apiserver.service

$ sudo systemctl start kube-controller-manager.service

$ sudo systemctl start kube-scheduler.service






4. Kubernetes Minion 설치

$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git

$ cd kubernetes

git checkout -b release-1.0 origin/release-1.0

$ cd cluster/ubuntu/

$ ./build.sh            # binaries 디렉토리로 다운로드 함


# Add binaries to /usr/bin

$ sudo cp -f binaries/minion/* /usr/bin


$ wget https://github.com/Metaswitch/calico-kubernetes-ubuntu-demo/archive/master.tar.gz

$ tar -xvf master.tar.gz

$ sudo cp -f calico-kubernetes-ubuntu-demo-master/node/kube-proxy.service /etc/systemd

$ sudo cp -f calico-kubernetes-ubuntu-demo-master/node/kube-kubelet.service /etc/systemd


$ sudo systemctl enable /etc/systemd/kube-proxy.service

$ sudo systemctl enable /etc/systemd/kube-kubelet.service


$ cp calico-kubernetes-ubuntu-demo-master/node/network-environment-template network-environment

$ vi network-environment

#! /usr/bin/bash

# This node's IPv4 address

DEFAULT_IPV4=192.168.75.201


# The kubernetes master IP

KUBERNETES_MASTER=192.168.75.201


# Location of etcd cluster used by Calico.  By default, this uses the etcd

# instance running on the Kubernetes Master

ETCD_AUTHORITY=192.168.75.201:4001


# The kubernetes-apiserver location - used by the calico plugin

KUBE_API_ROOT=https://192.168.75.201:443/api/v1/


$ sudo mv -f network-environment /etc



$ sudo systemctl start kube-proxy.service

$ sudo systemctl start kube-kubelet.service












4. kubernetes 설치

$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git

$ cd kubernetes

$ git checkout -b release-1.0 origin/release-1.0

$ sudo make release


$ cd _output/release-tars

$ sudo chown -R stack.docker *

$ tar xvf kubernetes-server-linux-amd64.tar.gz


$ sudo su -

$ echo "192.168.75.201 kube-master

192.168.75.202 kube-minion" >> /etc/hosts

$ exit





5. kubernetes Master 설치


# kube-master 에 뜨는 서비스

etcd

flanneld

kube-apiserver

kube-controller-manager

kube-scheduler


$ cd ~/kubernetes/_output/release-tars/kubernetes

$ cp server/bin/kube-apiserver /opt/bin/

$ cp server/bin/kube-controller-manager /opt/bin/

$ cp server/bin/kube-scheduler /opt/bin/

$ cp server/bin/kubectl /opt/bin/

$ cp server/bin/kubernetes /opt/bin/


$ sudo cp kubernetes/cluster/ubuntu/master/init_conf/etcd.conf /etc/init/

$ sudo cp kubernetes/cluster/ubuntu/master/init_conf/kube-apiserver.conf /etc/init/

$ sudo cp kubernetes/cluster/ubuntu/master/init_conf/kube-controller-manager.conf /etc/init/

$ sudo cp kubernetes/cluster/ubuntu/master/init_conf/kube-scheduler.conf /etc/init/


$ sudo cp kubernetes/cluster/ubuntu/master/init_scripts/etcd /etc/init.d/

$ sudo cp kubernetes/cluster/ubuntu/master/init_scripts/kube-apiserver /etc/init.d/

$ sudo cp kubernetes/cluster/ubuntu/master/init_scripts/kube-controller-manager /etc/init.d/

$ sudo cp kubernetes/cluster/ubuntu/master/init_scripts/kube-scheduler /etc/init.d/


$ sudo vi /etc/default/kube-apiserver

KUBE_API_ADDRESS="--address=0.0.0.0"

KUBE_API_PORT="--port=8080"

KUBELET_PORT="--kubelet_port=10250"

KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:4001"

KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16"

KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,ResourceQuota"

KUBE_API_ARGS=""



$ sudo vi /etc/default/kube-controller-manager

KUBELET_ADDRESSES="--machines=192.168.75.202"






6. Minion 설치


# kube-minion 에 뜨는 서비스

flanneld

kubelet

kube-proxy


cd ~/kubernetes/_output/release-tars/kubernetes

sudo cp server/bin/kubelet /opt/bin/

$ sudo cp server/bin/kube-proxy /opt/bin/

$ sudo cp server/bin/kubectl /opt/bin/

$ sudo cp server/bin/kubernetes /opt/bin/


$ sudo cp kubernetes/cluster/ubuntu/minion/init_conf/kubelet.conf /etc/init

$ sudo cp kubernetes/cluster/ubuntu/minion/init_conf/kube-proxy.conf /etc/init


$ sudo cp kubernetes/cluster/ubuntu/minion/init_scripts/kubelet /etc/init.d/

$ sudo cp kubernetes/cluster/ubuntu/minion/init_scripts/kube-proxy /etc/init.d/












$ cd ~/kubernetes

$ vi cluster/ubuntu/config-default.sh

export nodes=${nodes:-"stack@192.168.75.201 stack@192.168.75.202"}

roles=${roles:-"ai i"}

export NUM_MINIONS=${NUM_MINIONS:-2}

export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24}

export FLANNEL_NET=${FLANNEL_NET:-172.16.0.0/16}


$ cd cluster

$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh








3. go 소스 설치

https://golang.org/dl/

$ curl -L https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz -o go1.4.2.linux-amd64.tar.gz

$ tar xvf go1.4.2.linux-amd64.tar.gz

























Posted by Kubernetes Korea co-leader seungkyua@gmail.com