Helm chart

OpenStack 2018.03.23 10:33



# ingress.yaml 


images:

      tags:

        entrypoint: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        ingress: registry.cicd.stg.taco/nginx-ingress-controller:0.9.0

        error_pages: registry.cicd.stg.taco/defaultbackend:1.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

      pull_policy: Always

    config:

      worker-processes: "8"

    pod:

      replicas:

        ingress: 1

        error_page: 1




# openstack-ceph-config.yaml


images:

      tags:

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ceph_bootstrap: registry.cicd.stg.taco/ceph-daemon:tag-build-master-jewel-ubuntu-16.04

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        ceph_daemon: registry.cicd.stg.taco/ceph-daemon:tag-build-master-jewel-ubuntu-16.04

        ceph_config_helper: registry.cicd.stg.taco/ceph-config-helper:v1.7.5

        ceph_rbd_provisioner: registry.cicd.stg.taco/rbd-provisioner:v0.1.1

        ceph_cephfs_provisioner: registry.cicd.stg.taco/cephfs-provisioner:v0.1.1

      pull_policy: IfNotPresent 

    deployment:

      storage_secrets: true

      client_secrets: true

      rbd_provisioner: false

      cephfs_provisioner: false

      rgw_keystone_user_and_endpoints: false

    conf:

      ceph:

        global:

          mon_host: 192.168.51.20

    storageclass:

      rbd:

        provision_storage_class: false

        user_id: cinder

        admin_secret_namespace: openstack

      cephfs:

        provision_storage_class: false

        dmin_secret_namespace: openstack

    manifests:

      configmap_bin_clients: true

      configmap_bin_ks: true

      configmap_bin: true

      configmap_etc: true

      configmap_templates: true

      daemonset_mon: false

      daemonset_osd: false

      deployment_mds: false

      deployment_moncheck: false

      deployment_rbd_provisioner: false

      deployment_cephfs_provisioner: false

      deployment_rgw: false

      deployment_mgr: false

      job_bootstrap: false

      job_cephfs_client_key: false

      job_keyring: false

      job_ks_endpoints: false

      job_ks_service: false

      job_ks_user: false

      job_namespace_client_key_cleaner: true

      job_namespace_client_key: true

      job_rbd_pool: false

      job_storage_admin_keys: true

      secret_keystone_rgw: false

      secret_keystone: false

      service_mgr: false

      service_mon: false

      service_rgw: false

      service_mon_discovery: false

      storageclass: false

    dependencies:

      rbd_provisioner:

        jobs:

        services:




# mariadb.yaml


images:

      tags:

        mariadb: registry.cicd.stg.taco/mariadb:10.1.23

        test: registry.cicd.stg.taco/ocata/ubuntu-source-kolla-toolbox:develop

      pull_policy: Always

    pod:

      replicas:

        server: 3

    volume:

      enabled: true

      class_name: ceph



# etcd.yaml


images:

      tags:

        etcd: registry.cicd.stg.taco/etcd:v3.2.5 

        test: registry.cicd.stg.taco/ocata/ubuntu-source-kolla-toolbox:develop

      pull_policy: IfNotPresent

    pod:

      replicas:

        etcd: 1



# rabbitmq.yaml


images:

      tags:

        rabbitmq: registry.cicd.stg.taco/rabbitmq:3.7

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        test: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

      pull_policy: IfNotPresent

    pod:

      replicas:

        server: 3

    volume:

      class_name: ceph




# memcached.yaml


images:

      tags:

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        memcached: registry.cicd.stg.taco/memcached:1.5.5 

      pull_policy: IfNotPresent

    pod:

      replicas:

        server: 1





# libvirt.yaml


images:

      tags:

        libvirt: registry.cicd.stg.taco/ocata/ubuntu-source-nova-libvirt:2.2.0

      pull_policy: Always

    ceph:

      enabled: true

      cinder_user: "cinder"

      cinder_keyring: "xxxxx=="

    libvirt:

      listen_addr: 0.0.0.0

      log_level: 3

    manifests:

      configmap_bin: true

      configmap_etc: true

      daemonset_libvirt: true




# openvswitch.yaml


images:

      tags:

        openvswitch_db_server: registry.cicd.stg.taco/ocata/ubuntu-source-openvswitch-db-server:2.2.0

        openvswitch_vswitchd: registry.cicd.stg.taco/ocata/ubuntu-source-openvswitch-vswitchd:2.2.0

      pull_policy: Always

    network:

      external_bridge: br-ex

      interface:

        external: bond1.52

      auto_bridge_add: {}




# keystone.yaml


images:

      tags:

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        test: registry.cicd.stg.taco/ocata/ubuntu-source-rally:2.2.0

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        keystone_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        keystone_fernet_setup: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        keystone_fernet_rotate: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        keystone_credential_setup: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        keystone_credential_rotate: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        keystone_api: registry.cicd.stg.taco/ocata/ubuntu-source-keystone:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

      pull_policy: Always

    conf:

      keystone:

        DEFAULT:

          debug: true

    pod:

      replicas:

        api: 3




# glance.yaml


storage: rbd

    images:

      tags:

        test: registry.cicd.stg.taco/ocata/ubuntu-source-rally:2.2.0

        glance_storage_init: registry.cicd.stg.taco/ceph-daemon:tag-build-master-jewel-ubuntu-16.04

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        glance_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-glance-api:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        glance_api: registry.cicd.stg.taco/ocata/ubuntu-source-glance-api:2.2.0

        glance_registry: registry.cicd.stg.taco/ocata/ubuntu-source-glance-registry:2.2.0

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

      pull_policy: Always

    pod:

      replicas:

        api: 3

        registry: 3

      user:

        glance:

          uid: 42415

    network:

      api:

        ingress:

          proxy_body_size: 102400M

    conf:

      ceph:

        monitors: ["192.168.51.20"]

        admin_keyring: "xxxx=="

      glance:

        glance_store:

          rbd_store_user: glance

          rbd_store_pool: images

        DEFAULT:

          show_image_direct_url: true

    bootstrap:

      enabled: true

      images:

        cirros:

          id: 201084fc-c276-4744-8504-cb974dbb3610

          private: false




# nova.yaml


images:

      tags:

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-kolla-toolbox:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-kolla-toolbox:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-kolla-toolbox:2.2.0

        nova_api: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        nova_cell_setup: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        nova_compute: registry.cicd.stg.taco/ocata/ubuntu-source-nova-compute:2.2.0

        nova_compute_ironic: registry.cicd.stg.taco/ocata/ubuntu-source-nova-compute-ironic:2.2.0

        nova_compute_ssh: registry.cicd.stg.taco/ocata/ubuntu-source-nova-ssh:2.2.0

        nova_conductor: registry.cicd.stg.taco/ocata/ubuntu-source-nova-conductor:2.2.0

        nova_consoleauth: registry.cicd.stg.taco/ocata/ubuntu-source-nova-consoleauth:2.2.0

        nova_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-nova-api:2.2.0

        nova_novncproxy: registry.cicd.stg.taco/ocata/ubuntu-source-nova-novncproxy:2.2.0

        nova_novncproxy_assets: registry.cicd.stg.taco/ocata/ubuntu-source-nova-novncproxy:2.2.0

        nova_placement: registry.cicd.stg.taco/ocata/ubuntu-source-nova-placement-api:2.2.0

        nova_scheduler: registry.cicd.stg.taco/ocata/ubuntu-source-nova-scheduler:2.2.0

        nova_spiceproxy: registry.cicd.stg.taco/ocata/ubuntu-source-nova-spicehtml5proxy:2.2.0

        nova_spiceproxy_assets: registry.cicd.stg.taco/ocata/ubuntu-source-nova-spicehtml5proxy:2.2.0

        test: registry.cicd.stg.taco/ocata/ubuntu-source-rally:2.2.0

      pull_policy: Always

    bootstrap:

      enabled: true

      flavors:

        m1_tiny:

          id: 0c84e220-a258-439f-a6ff-f8e9fd980025

    network:

      novncproxy:

        name: "nova-novncproxy"

        node_port:

          enabled: true

          port: 30608

        port: 6080

        targetPort: 6080

    ceph:

      enabled: true

      cinder_user: "cinder"

      cinder_keyring: "xxxx=="

      secret_uuid: "582393ff-9a5c-4a2e-ae0d-86ec18c36afc"

    conf:

      nova:

        DEFAULT:

          force_config_drive: true

          scheduler_default_filters: "RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"

          debug: true

        vnc:

          novncproxy_base_url: http://ctrl01-stg:30608/vnc_auto.html

        libvirt:

          rbd_user: "cinder"

          rbd_secret_uuid: "582393ff-9a5c-4a2e-ae0d-86ec18c36afc"

        scheduler:

          discover_hosts_in_cells_interval: 60

    endpoints:

      oslo_db_cell0:

        path: /nova_cell0

    pod:

      user:

        nova:

          uid: 42436

      replicas:

        api_metadata: 3

        osapi: 3

        conductor: 3

        consoleauth: 3

        scheduler: 3

        novncproxy: 3




# neutron.yaml


images:

      tags:

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        neutron_test: registry.cicd.stg.taco/ocata/ubuntu-source-rally:2.2.0

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        neutron_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-server:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        neutron_server: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-server:2.2.0

        neutron_dhcp: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-dhcp-agent:2.2.0

        neutron_metadata: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-metadata-agent:2.2.0

        neutron_l3: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-l3-agent:2.2.0

        neutron_openvswitch_agent: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-openvswitch-agent:2.2.0

        neutron_linuxbridge_agent: registry.cicd.stg.taco/ocata/ubuntu-source-neutron-linuxbridge-agent:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

      pull_policy: Always

    pod:

      replicas:

        server: 3

      user:

        neutron:

          uid: 42435

    labels:

      agent:

        dhcp:

          node_selector_key: openstack-network-node

        l3:

          node_selector_key: openstack-network-node

    manifests:

      daemonset_metadata_agent: false

      daemonset_ovs_agent: true

      daemonset_lb_agent: false

    network:

      backend: ovs

      external_bridge: br-ex

      interface:

        tunnel: bond1

    conf:

      neutron_sudoers:

        override: |

          # This sudoers file supports rootwrap-daemon for both Kolla and LOCI Images.

          Defaults !requiretty

          Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"

          neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf

      neutron:

        DEFAULT:

          debug: True

          core_plugin: ml2

          l3_ha: True

          global_physnet_mtu: 9000

          service_plugins: router

          interface_driver: openvswitch

        agent:

          root_helper_daemon: sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf

      plugins:

        ml2_conf:

          ml2:

            mechanism_drivers: openvswitch,l2population

            type_drivers: flat, vlan, vxlan

            tenant_network_types: vxlan

        openvswitch_agent:

          agent:

            tunnel_types: vxlan

            l2_population: True

            arp_responder: True

          ovs:

            bridge_mappings: "external:br-ex"

          securitygroup:

            firewall_driver: openvswitch






# cinder.yaml


images:

      tags:

        test: registry.cicd.stg.taco/ocata/ubuntu-source-rally:2.2.0

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        cinder_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-api:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        cinder_api: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-api:2.2.0

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        cinder_scheduler: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-scheduler:2.2.0

        cinder_volume: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-volume:2.2.0

        cinder_volume_usage_audit: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-volume:2.2.0

        cinder_storage_init: registry.cicd.stg.taco/ceph-daemon:tag-build-master-jewel-ubuntu-16.04

        cinder_backup: registry.cicd.stg.taco/ocata/ubuntu-source-cinder-backup:2.2.0

        cinder_backup_storage_init: registry.cicd.stg.taco/ceph-daemon:tag-build-master-jewel-ubuntu-16.04

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

      pull_policy: Always

    pod:

      user:

        cinder:

          uid: 42407

      replicas:

        api: 3

        backup: 1

        scheduler: 3

        volume: 1

    conf:

      ceph:

        admin_keyring: "xxxxx=="

        monitors: ["192.168.51.20"]

      cinder:

        DEFAULT:

          debug: true

          backup_ceph_user: "cinder"

          backup_ceph_pool: "backups"

      backends:

        rbd1:

          volume_driver: cinder.volume.drivers.rbd.RBDDriver

          volume_backend_name: rbd1

          rbd_ceph_conf: "/etc/ceph/ceph.conf"

          rbd_flatten_volume_from_snapshot: false

          rbd_max_clone_depth: 5

          rbd_store_chunk_size: 4

          rados_connect_timeout: -1

          rbd_user: "cinder"

          rbd_pool: "volumes"





# heat.yaml


images:

      tags:

        bootstrap: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        heat_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-heat-api:2.2.0

        db_drop: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_user: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_service: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        ks_endpoints: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        heat_api: registry.cicd.stg.taco/ocata/ubuntu-source-heat-api:2.2.0

        heat_cfn: registry.cicd.stg.taco/ocata/ubuntu-source-heat-api:2.2.0

        heat_cloudwatch: registry.cicd.stg.taco/ocata/ubuntu-source-heat-api:2.2.0

        heat_engine: registry.cicd.stg.taco/ocata/ubuntu-source-heat-engine:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        rabbit_init: registry.cicd.stg.taco/rabbitmq:3.7-management

      pull_policy: Always

    pod:

      user:

        heat:

          uid: 42418

      replicas:

        api: 3

        cfn: 3

        cloudwatch: 3

        engine: 3



# horizon.yaml


images:

      tags:

        db_init: registry.cicd.stg.taco/ocata/ubuntu-source-horizon:2.2.0

        horizon_db_sync: registry.cicd.stg.taco/ocata/ubuntu-source-horizon:2.2.0

        horizon: registry.cicd.stg.taco/ocata/ubuntu-source-horizon:2.2.0

        dep_check: registry.cicd.stg.taco/kubernetes-entrypoint:v0.2.1

        test: registry.cicd.stg.taco/ocata/ubuntu-source-horizon:develop

      pull_policy: Always

    pod:

      replicas:

        server: 3

    network:

      external_policy_local: false

      node_port:

        enabled: true

        port: 32000

    local_settings:

      openstack_neutron_network:

        enable_router: "True"

        enable_quotas: "True"

        enable_ipv6: "False"

        enable_distributed_router: "False"

        enable_ha_router: "True"

        enable_lb: "True"

        enable_firewall: "False"

        enable_vpn: "False"

        enable_fip_topology_check: "True"




Posted by Kubernetes Korea co-leader seungkyua@gmail.com

OpenStack 발표자료 (From Kubernetes to OpenStack)





Posted by Kubernetes Korea co-leader seungkyua@gmail.com

OpenStack Day Korea 2017 에서 발표한 자료




Posted by Kubernetes Korea co-leader seungkyua@gmail.com

## OpenStack Foundation 사용자 등록



## launchpad 에 사용자 등록 (OpenStack Foundation email 과 동일해야 함)

## launchpad.net 사용자 id 확인 (자신의 id 로 조회되는지 확인)
https://launchpad.net/~seungkyua


## review 사이트에 사용자 등록


## review 사이트에서 필요한 정보 등록
1. Profile 메뉴에서 Username 등록
2. Contact Information 에서 아래 처럼 날짜 업데이트 되었는지 확인 (안되어 있으면 정보 입력)
   Contact information last updated on May 25, 2015 at 12:51 PM.
3. SSH Public Keys 등록
   $ cat ~/.ssh/id_rsa.pub
4. Agreements 서명





[ stackalytics 에 추가 ]
$ mkdir -p ~/Documents/git && cd ~/Documents/git
$ git clone ssh://seungkyu@review.openstack.org:29418/openstack/stackalytics
$ cd stackalytics


## git 및 git-review 설치
$ brew install git git-review


## 환경 설정 (gitreview.username 은 review 사이트의 Profile Username 임)
$ git config --add gitreview.username "seungkyu"
git config --add user.name "Seungkyu Ahn"
git config --add user.email "seungkyua@gmail.com"




## 접속 테스트 및 commit-msg hook 다운로드
$ git review -s




## 개인 추가 (launchpad_id 의 abc 순), end_date: null 은 하나 밖에 못씀
## launchpad_id 만 필수, 나머지 id 는 옵션
$ git checkout -b seungkyua
$ vi etc/default_data.json
        {
            "launchpad_id": "seungkyua",
            "gerrit_id": "seungkyu",
            "github_id": "seungkyua",
            "companies": [
                {
                    "company_name": "Samsung SDS",
                    "end_date": "2015-Feb-28"
                },
                {
                    "company_name": "OpenStack Korea User Group",
                    "end_date": "2016-Dec-31"
                },
                {
                    "company_name": "SK telecom",
                    "end_date": null
                }
            ],
            "user_name": "Seungkyu Ahn",
            "emails": ["ahnsk@sk.com", "seungkyua@gmail.com"]
        },




## companies 항목에 회사명이 없을 때는 추가해야 함
25785         {
25786             "domains": ["sktelecom.com"],
25787             "company_name": "SK telecom",
25788             "aliases": ["SKT", "SKTelecom"]
25789         },



$ git commit -a

## commit message 는 아래와 같이
modify personal info about seungkyua


## commit message 작성법
첫번째 라인은 50자 이내로 간단히 요약을 쓴다.
[공백라인]
설명을 적되 라인은 72자가 넘어가면 다음 라인에 쓴다.



## review 올리기
$ git review



## git review 시 Change-Id 세팅 에러가 나면 화면 에러 대로 수행
$ gitdir=$(git rev-parse --git-dir); scp -p -P 29418 seungkyu@review.openstack.org:hooks/commit-msg ${gitdir}/hooks/

$ git commit --amend
$ git review


## 확인










Posted by Kubernetes Korea co-leader seungkyua@gmail.com

## OpenStack CLI 를 사용할 때 현재 어떤 프로젝트와 사용자인지를 알려주는 Prompt 만들기


## 오픈스택 사용자를 위한 프롬프트 설정  (project:user) 로 표시됨

$ vi ~/.bashrc


openstack_user() {

  env | grep -E 'OS_USERNAME|OS_PROJECT_NAME' 2> /dev/null | sed -e 's/OS_PROJECT_NAME=\(.*\)/(\1/' -e 's/OS_USERNAME=\(.*\)/\1)/' | paste -sd ":"

}


PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]$(openstack_user)\$ '





$ . demo/demo-openrc

(demo:demo)$ openstack server list






Posted by Kubernetes Korea co-leader seungkyua@gmail.com
## root 계정으로 수행해야 함


# apt-get update
# apt-get install -y gcc make
# apt-get install -y python-pip python-dev python3-dev libevent-dev \
                            vlan libvirt-bin bridge-utils lvm2 openvswitch-switch \
                            python-libvirt nbd-client ipset ntp python-lzma \
                            p7zip-full arping qemu-kvm

# apt-get install -y python-tox libmysqlclient-dev libpq-dev \
                           libxml2-dev libxslt1-dev libvirt-dev libffi-dev

# apt-get install -y virtinst libsemanage1-dev python-semanage \
                            attr policycoreutils


## avocado 설치
# cd ~
# mkdir avocado && cd avocado
# git clone git://github.com/avocado-framework/avocado.git
# cd avocado
# make requirements
# python setup.py install


##  avocado plugin 설치 (avocado-vt)
# cd ~/avocado
# cd avocado
# make requirements-plugins
# make link


# vi ~/.config/avocado/avocado.conf
[datadir.paths]
base_dir = /root/avocado/avocado
test_dir = /root/avocado/avocado/examples/tests
data_dir = /usr/share/avocado/data
logs_dir = /root/avocado/avocado/job-results



## Bootstrapping Avocado-VT (vt-type : qemu, libvirt .....)
# ./scripts/avocado vt-bootstrap --vt-type libvirt



## Avocado plugins list 보기
# ./scripts/avocado plugins


## vt-type 별 test list 보기 (vt-type : qemu, libvirt .....)
# ./scripts/avocado list --vt-type libvirt --verbose


## libvirt test case 한개 돌리기
# ./scripts/avocado run type_specific.io-github-autotest-qemu.driver_load.with_balloon


## 결과 보기
# cd /root/avocado/avocado/job-results/job-2016-08-31T09.17-1daa785/\
html/results.html


## 전체 테스트 돌리기
# ./scripts/avocado run type_specific










Posted by Kubernetes Korea co-leader seungkyua@gmail.com

localconf

OpenStack 2016.05.27 10:59

[[local|localrc]]

ADMIN_PASSWORD=secret

DATABASE_PASSWORD=$ADMIN_PASSWORD

RABBIT_PASSWORD=$ADMIN_PASSWORD

SERVICE_PASSWORD=$ADMIN_PASSWORD

HOST_IP=10.40.102.84 // VM IP로 변경

# Do not use Nova-Network

disable_service n-net

# Enable Neutron

ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3

## Neutron options

Q_USE_SECGROUP=True

FLOATING_RANGE="10.40.102.0/24"

FIXED_RANGE="10.0.0.0/24"

Q_FLOATING_ALLOCATION_POOL=start=10.40.102.250,end=10.40.102.254

PUBLIC_NETWORK_GATEWAY="10.40.102.1"

PUBLIC_INTERFACE=eth0

# Open vSwitch provider networking configuration

Q_USE_PROVIDERNET_FOR_PUBLIC=True

OVS_PHYSICAL_BRIDGE=br-ex

PUBLIC_BRIDGE=br-ex

OVS_BRIDGE_MAPPINGS=public:br-ex

# Disable Identity v2

ENABLE_IDENTITY_V2=False

Posted by Kubernetes Korea co-leader seungkyua@gmail.com

1. 로그 로테이트 설정

    - 로그 파일이 쌓이는 것을 막아줌


2. Availability Zone 과 Aggregate Host 설정

    - VM 을 효율적으로 스케줄링 할 수 있음.


3. cpu, memory, disk ratio 설정

    - overcommit 을 고려


4. Nova Compute 에서 inject password 나 inject file 을 false 로 설정 

    - VM 부팅 속도를 빠르게 함


5. Cinder QoS, Network QoS 설정

    - 스토리지와 Network 의 QoS 설정으로 간섭을 최소화 함


6. Neutron Network 설정 정보

    - Provider Network 를 사용해야 tunneling 이 없어 속도가 빠름


7. live migration 설정

    - maxdowntime 을 적절히 설정해야 함


8. 캐시가 안되어 있는 새로운 이미지로 여러 VM 동시 생성 속도 측정

    - 이미지를 가져오는 이슈로 네트워크 대역폭을 다 소모할 수 있음

    - 사전에 이미지가 캐시되게 모든 host 에 해당 vm 을 미리 생성


9. VM 인스턴스 데이터가 저장되는 /var/lib/nova 의 디스크 사이즈가 충분한지 검증



To be continue ...











Posted by Kubernetes Korea co-leader seungkyua@gmail.com

[ Server IP 정보 ]

eth0 : NAT type         (vmnet2)  192.168.75.138        Public Network

eth1 : Host-only type (vmnet3)  192.168.230.138      Private Network

[ Multi Node 의 경우 두번째 추가 Compute Node ]
eth0 : NAT type         (vmnet2)  192.168.75.139       Public Network
eth1 : Host-only type (vmnet3)  192.168.230.139      Private Network

[ User 선택 ]
stack 유저로 생성

[ visudo 세팅 ]
stack   ALL=(ALL:ALL) NOPASSWD:ALL

[ vi /etc/network/interfaces ]
auto lo
iface lo inet loopback

auto ens33
iface ens33 inet static
        address 192.168.75.138
        netmask 255.255.255.0
        gateway 192.168.75.2
        dns-nameservers 8.8.8.8 8.8.4.4

auto ens34
iface ens34 inet static
        address 192.168.230.138
        netmask 255.255.255.0


[ Host 변경 ]
mkdir -p ~/Documents/scripts
cd ~/Documents/scripts

vi servers.txt
192.168.230.138 devstack01
192.168.230.139 devstack02

vi 01-hosts-setup.sh
#!/bin/bash

SERVERLIST=$HOME/Documents/scripts/servers.txt
MASTER_IP="192.168.230.138"
MASTER_HOSTNAME="devstack01"
SSH_USER="stack"

function set_sshkey() {
    local server=$1
    if [[ $server == "$MASTER_IP" ]]; then
        if [[ ! -f "${HOME}/.ssh/id_rsa" ]]; then
            yes "" | ssh-keygen -t rsa -N ""
        else
            echo "skip to create ssh-keygen"
        fi
    fi
    cat ~/.ssh/id_rsa.pub | ssh $SSH_USER@$server -oStrictHostKeyChecking=no \
        "if [ ! -f ~/.ssh/authorized_keys ] || ! grep -q ${MASTER_HOSTNAME} ~/.ssh/authorized_keys; then \
             umask 077; test -d .ssh || mkdir -p .ssh; cat >> ~/.ssh/authorized_keys; \
         fi"
    echo "$server ssh-key ..... done"
}

function change_hostname() {
    local server=$1
    local hostname=$2
    echo ${hostname} | ssh $SSH_USER@$server \
    "if ! grep -q ${hostname} /etc/hostname; then \
         sudo su -c 'cat > /etc/hostname'; \
         sudo hostname -F /etc/hostname;
     fi"
    echo "$server $hostname ..... done"
}

function change_hostfile() {
    local server=$1
    cat servers.txt | ssh $SSH_USER@$server \
    "if ! grep -q ${MASTER_HOSTNAME} /etc/hosts; then \
         sudo su -c 'cat >> /etc/hosts';
     fi"
    echo "$server hostfile .... done"
}

echo "setting sshkey ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        set_sshkey $server
    fi
done < $SERVERLIST

echo "changing hostname ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        hostname=$(echo $line | awk '{print $2}')
        change_hostname $server $hostname
    fi
done < $SERVERLIST

echo "changing hosts file ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        change_hostfile $server
    fi
done < $SERVERLIST



[ NTP 세팅 ]
vi 02-ntp-setup.sh
#!/bin/bash

SERVERLIST=$HOME/Documents/scripts/servers.txt
MASTER_IP="192.168.230.138"
SSH_USER="stack"

function ntp_master_setup() {
    local server=$1
    echo $server | ssh ${SSH_USER}@$server \
    "sudo apt-get update; \
     sudo apt-get install -y bridge-utils libvirt-bin ntp ntpdate; \
     if ! grep -q 'server 127.127.1.0' /etc/ntp.conf; then \
         sudo sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 3.ubuntu.pool.ntp.org/server time.bora.net/g' /etc/ntp.conf; \
         sudo sed -i 's/server ntp.ubuntu.com/server 127.127.1.0/g' /etc/ntp.conf; \
         sudo sed -i 's/restrict 127.0.0.1/restrict 192.168.0.0 mask 255.255.0.0 nomodify notrap/g' /etc/ntp.conf; \
         sudo service ntp restart; \
     fi; \
     sudo ntpdate -u time.bora.net; \
     sudo virsh net-destroy default; \
     sudo virsh net-undefine default"
}

function ntp_slave_setup() {
    local server=$1
    echo $server | ssh ${SSH_USER}@$server \
    "sudo apt-get update; \
     sudo apt-get install -y bridge-utils libvirt-bin ntp ntpdate; \
     if ! grep -c ${MASTER_IP} /etc/ntp.conf; then \
         sudo sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server 3.ubuntu.pool.ntp.org/#server 3.ubuntu.pool.ntp.org/g' /etc/ntp.conf; \
         sudo sed -i 's/server ntp.ubuntu.com/server $MASTER_IP/g' /etc/ntp.conf; \
         sudo service ntp restart; \
     fi; \
     sudo ntpdate -u $MASTER_IP; \
     sudo virsh net-destroy default; \
     sudo virsh net-undefine default"
}

echo "ntp master setting ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        if [[ $server == "$MASTER_IP" ]]; then
            ntp_master_setup $server
        fi
    fi
done < $SERVERLIST

echo "ntp slave setting ........."
while read line; do
    if [[ $(echo $line | cut -c1) != "#" ]]; then
        server=$(echo $line | awk '{print $1}')
        if [[ $server != "$MASTER_IP" ]]; then
            ntp_slave_setup $server
        fi
    fi
done < $SERVERLIST



[ local.conf 파일 ]
mkdir -p ~/Documents/github
cd github
git clone https://github.com/openstack-dev/devstack.git
cd devstack

vi local.conf
[[local|localrc]]
HOST_IP=192.168.75.138
SERVICE_HOST=192.168.75.138
MYSQL_HOST=192.168.75.138
RABBIT_HOST=192.168.75.138
GLANCE_HOSTPORT=192.168.75.138:9292
ADMIN_PASSWORD=secret
DATABASE_PASSWORD=secret
RABBIT_PASSWORD=secret
SERVICE_PASSWORD=secret

# Do not use Nova-Network
disable_service n-net

# Neutron service
enable_service neutron
enable_service q-svc
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
enable_service q-meta

# Neutron options
Q_USE_SECGROUP=True
FLOATING_RANGE="192.168.75.0/24"
FIXED_RANGE="10.0.0.0/24"
Q_FLOATING_ALLOCATION_POOL=start=192.168.75.193,end=192.168.75.254
PUBLIC_NETWORK_GATEWAY="192.168.75.2"
Q_L3_ENABLED=True
PUBLIC_INTERFACE=ens33

# Open vSwitch provider networking configuration
Q_USE_PROVIDERNET_FOR_PUBLIC=True
OVS_PHYSICAL_BRIDGE=br-ex
PUBLIC_BRIDGE=br-ex
OVS_BRIDGE_MAPPINGS=public:br-ex

# Nova service
enable_service n-api
enable_service n-cpu
enable_service n-cond
enable_service n-sch
enable_service n-novnc
enable_service n-cauth

# Cinder service
enable_service cinder
enable_service c-api
enable_service c-vol
enable_service c-sch
enable_service c-bak

# Tempest service
enable_service tempest

# Swift service
enable_service s-proxy
enable_service s-object
enable_service s-container
enable_service s-account

# Heat service
enable_service heat
enable_service h-api
enable_service h-api-cfn
enable_service h-api-cw
enable_service h-eng

# Enable plugin neutron-lbaas, octavia
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas master
enable_plugin octavia https://git.openstack.org/openstack/octavia

# Enable plugin Magnum
#enable_plugin magnum https://github.com/openstack/magnum master
#enable_plugin magnum-ui https://github.com/openstack/magnum-ui master

# Enable plugin Monasca (Ubuntu 16.04 사용 시 Systemctl 에 맞게 수정 필요)
enable_plugin monasca-api https://github.com/openstack/monasca-api master
enable_plugin monasca-log-api https://github.com/openstack/monasca-log-api master

MONASCA_API_IMPLEMENTATION_LANG=\

${MONASCA_API_IMPLEMENTATION_LANG:-python}

MONASCA_PERSISTER_IMPLEMENTATION_LANG=\

${MONASCA_PERSISTER_IMPLEMENTATION_LANG:-python}

MONASCA_METRICS_DB=${MONASCA_METRICS_DB:-influxdb}



# Cinder configuration
VOLUME_GROUP="cinder-volumes"
VOLUME_NAME_PREFIX="volume-"

# Images
# Use this image when creating test instances
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
# Use this image when working with Orchestration (Heat)
IMAGE_URLS+=",https://download.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-23-20151030.x86_64.qcow2"

KEYSTONE_CATALOG_BACKEND=sql
API_RATE_LIMIT=False
SWIFT_HASH=testing
SWIFT_REPLICAS=1
VOLUME_BACKING_FILE_SIZE=70000M

LOGFILE=$DEST/logs/stack.sh.log

# A clean install every time
RECLONE=yes



[ Compute Node 추가 ]
vi local.conf
[[local|localrc]]
HOST_IP=192.168.75.139
SERVICE_HOST=192.168.75.138
MYSQL_HOST=192.168.75.138
RABBIT_HOST=192.168.75.138
GLANCE_HOSTPORT=192.168.75.138:9292
ADMIN_PASSWORD=secret
MYSQL_PASSWORD=secret
RABBIT_PASSWORD=secret
SERVICE_PASSWORD=secret

# Neutron options
PUBLIC_INTERFACE=ens33
ENABLED_SERVICES=n-cpu,n-novnc,rabbit,q-agt

LOGFILE=$DEST/logs/stack.sh.log



[ 설치 실행 ]
./stack.sh


[ 스토리지 마운트 ]
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 /opt/stack/data/swift/drives/images/swift.img /opt/stack/data/swift/drives/sdb1

sudo losetup /dev/loop1 /opt/stack/data/cinder-volumes-default-backing-file

sudo losetup /dev/loop2 /opt/stack/data/cinder-volumes-lvmdriver-1-backing-file


[ CPU, Ram, Disk Overcommit 세팅 ]
vi /etc/nova/nova.conf

scheduler_default_filters = ..., CoreFilter          # CoreFilter 추가
cpu_allocation_ratio=50.0
ram_allocation_ratio=16.0
disk_allocation_ratio=50.0


[ 서비스 실행 ]
screen -c stack-screenrc


[ VM 생성 ]
. openrc admin demo


openstack project list
openstack security group list

# default sec group rule 추가
openstack security group rule create --proto icmp --src-ip 0.0.0.0/0 --dst-port -1 --ingress 2d95031b-132b-4d46-aacd-f392cdd8c4fb

openstack security group rule create --proto tcp --src-ip 0.0.0.0/0 --dst-port 1:65535 --ingress 2d95031b-132b-4d46-aacd-f392cdd8c4fb

# private key 생성
openstack keypair create --public-key ~/.ssh/id_rsa.pub magnum-key


openstack flavor list
openstack image list
openstack network list

# nova boot
openstack server create --image 7e688989-e59b-4b20-a562-1de946ee91e9 --flavor m1.tiny  --nic net-id=f57b8f2c-cd67-4d49-b38c-393dbb773c9b  --key-name magnum-key --security-group default test-01


# floating ip 생성 및 서버 할당
openstack ip floating create public
openstack ip floating list
openstack ip floating add 192.168.75.194 test-01


# Router 보기
sudo ip netns
qdhcp-f57b8f2c-cd67-4d49-b38c-393dbb773c9b
qrouter-b46e14d5-4ef5-4bfa-8dc3-463a982688ab


[ tcpdump 방법 ]
# Compute Node
[vm] -> tap:[qbrb97b5aa3-f8 Linux Bridge]:qvbb97b5aa3-f8 -> qvob97b5aa3-f8:[OVS br-int Bridge]:patch-tun -> patch-int:[OVS br-tun Bridge]:br-tun ->

# Network Node
br-tun:OVS br-tun Bridge:patch-int -> patch-tun:OVS br-int Bridge:qr-c163af1e-53 -> 
qr-c163af1e-53:qrouter(Namespace) -> qg-d8187261-68:qg(Namespace) -> 
qg-d8187261-68:OVS br-int Bridge:int-br-ex -> phy-br-ex:OVS br-ex Bridge -> NIC 

sudo tcpdump -n -e -i qbrb97b5aa3-f8 | grep 10.0.0.3
sudo tcpdump -n -e -i qvbb97b5aa3-f8 | grep 10.0.0.3
sudo tcpdump -n -e -i qvob97b5aa3-f8 | grep 10.0.0.3
sudo ip netns exec qrouter-b46e14d5-4ef5-4bfa-8dc3-463a982688ab tcpdump -n -e -i qr-c163af1e-53 | grep 10.0.0.3



[ Magnum k8s 생성 ]
cd ~/Documents/github/devstack/files
wget https://fedorapeople.org/groups/magnum/fedora-21-atomic-5.qcow2
glance image-create --name fedora-21-atomic-5 \
                    --visibility public \
                    --disk-format qcow2 \
                    --os-distro fedora-atomic \
                    --container-format bare < fedora-21-atomic-5.qcow2


magnum service-list

magnum baymodel-create --name k8sbaymodel \
                       --image-id fedora-21-atomic-5 \
                       --keypair-id magnum-key \
                       --external-network-id public \
                       --dns-nameserver 8.8.8.8 \
                       --flavor-id m1.small \
                       --docker-volume-size 5 \
                       --network-driver flannel \
                       --coe kubernetes

magnum baymodel-list
magnum bay-create --name k8sbay --baymodel k8sbaymodel --node-count 1

neutron lb-pool-list
neutron lb-vip-list
neutron lb-member-list

magnum bay-list


[ magnum 클러스터 생성 에러 시 수동으로 할 때 삭제해야 할 것 ]
floating ip  삭제 - api-pool-vip,  kube-master, kube-node
openstack ip floating list
sudo ip netns exec qrouter-2f49aeb4-421c-4994-923a-5aafe453fa3d ip a

api.pool.vip 삭제
neutron lb-vip-list
neutron lb-pool-list
neutron lb-member-list

# private network 삭제
openstack network list

# router 삭제, external gateway 삭제
openstack router list
openstack port list
openstack router remove port        (gateway 를 제거)
openstack router remove subnet    (subnet 을 제거)











Posted by Kubernetes Korea co-leader seungkyua@gmail.com

Compute IP          : 172.23.147.187

가상 IP  NAT         :  192.168.75.0

가상 IP  Host-Only :  192.168.230.0


1. HP Helion OpenStack Community Version 다운로드

https://helion.hpwsportal.com/catalog.html#/Home/Show

# mkdir -p /root/work

# tar -xzvf HP_Helion_OpenStack_1.1.1.tgz -C /root/work



2. 설치 문서

http://docs.hpcloud.com/helion/community/install-virtual/


3. sudo 세팅

$ sudo visudo

stack   ALL=(ALL:ALL) NOPASSWD: ALL


4. root 접속 및 rsa 키 생성

$ sudo su -

ssh-keygen -t rsa


# s/w 설치

# apt-get update

# apt-get dist-upgrade

# sudo su -l -c "apt-get install -y qemu-kvm libvirt-bin openvswitch-switch openvswitch-common python-libvirt qemu-system-x86 ntpdate ntp openssh-server"


5. ntp 서버 설정

# ntpdate -u time.bora.net

# vi /etc/ntp.conf

...

#server 0.ubuntu.pool.ntp.org

#server 1.ubuntu.pool.ntp.org

#server 2.ubuntu.pool.ntp.org

#server 3.ubuntu.pool.ntp.org

server time.bora.net

...

restrict 192.0.2.0 mask 255.255.255.0 nomodify notrap



# Use Ubuntu's ntp server as a fallback.

#server ntp.ubuntu.com

server 127.127.1.0

...


sudo /etc/init.d/ntp restart

# ntpq -p                             # ntp 상태 보기

# dpkg-reconfigure ntp         # ntp 에러 날 때




5. unpacking

# mkdir work

# cd work

tar zxvf /{full path to downloaded file from step 2}/Helion_Openstack_Community_V1.4.tar.gz



7. VM 사양 조정

vi /root/vm_plan.csv

,,,,2,4096,512,Undercloud

,,,,2,24576,512,OvercloudControl

,,,,2,8192,512,OvercloudSwiftStorage

,,,,4,16384,512,OvercloudCompute



6. start seed vm

export SEED_NTP_SERVER=192.168.122.1

export NODE_MEM=4096

HP_VM_MODE=y bash -x /root/work/tripleo/tripleo-incubator/scripts/hp_ced_host_manager.sh --create-seed --vm-plan /root/vm_plan.csv 2>&1|tee seedvminstall.log



7. Under Cloud, Over Cloud 생성

# seed vm 접속

ssh 192.0.2.1


# 변수 세팅

# export OVERCLOUD_CONTROLSCALE=1

export OVERCLOUD_SWIFTSTORAGESCALE=1

export OVERCLOUD_SWIFT_REPLICA_COUNT=1

export ENABLE_CENTRALIZED_LOGGING=0

export USE_TRICKLE=0

export OVERCLOUD_STACK_TIMEOUT=240

export UNDERCLOUD_STACK_TIMEOUT=240

export OVERCLOUD_NTP_SERVER=192.168.122.1

export UNDERCLOUD_NTP_SERVER=192.168.122.1

export FLOATING_START=192.0.8.140

export FLOATING_END=192.0.8.240

export FLOATING_CIDR=192.0.8.0/21

export OVERCLOUD_NEUTRON_DVR=False



# 로케일 변경

export LANGUAGE=en_US.UTF-8

export LANG=en_US.UTF-8

export LC_ALL=en_US.UTF-8



# start Under Cloud

bash -x tripleo/tripleo-incubator/scripts/hp_ced_installer.sh 2>&1|tee stackinstall.log



8. 아래 IP 확인

OVERCLOUD_IP_ADDRESS  : 192.0.2.23

UNDERCLOUD_IP_ADDRESS  : 192.0.2.2



9. 설치 확인하기

# demo, admin 유저의 패스워드 확인

cat /root/tripleo/tripleo-undercloud-passwords

cat /root/tripleo/tripleo-overcloud-passwords


10. seed vm에 접속한 후 undercloud ip 보기

# . /root/stackrc

UNDERCLOUD_IP=$(nova list | grep "undercloud" | awk ' { print $12 } ' | sed s/ctlplane=// )

echo $UNDERCLOUD_IP


11. seed vm 에서 overcloud ip 보기

. /root/tripleo/tripleo-overcloud-passwords

TE_DATAFILE=/root/tripleo/ce_env.json

. /root/tripleo/tripleo-incubator/undercloudrc

OVERCLOUD_IP=$(heat output-show overcloud KeystoneURL | cut -d: -f2 | sed s,/,,g )

# echo $OVERCLOUD_IP



[ OverCloud 내 VM 이 인터넷 연결이 될 수 있도록 수정]

0. DNS change (overcloud)

/etc/resolv.conf


1. security rule check (overcloud)



2. ip forward (host, seed, undercloud, overcloud)
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.all.rp_filter = 0
net.ipv4.ip_forward = 1


3. br-tun, br-int, br-ex up (host, seed, overcloud, compute)
ip link set br-tun up
ip link set br-ex up
ip link set br-int up


4. Host iptables NAT add
iptables -t nat -A POSTROUTING -s 192.0.8.0/21 ! -d 192.0.2.0/24 -j SNAT --to-source 172.23.147.187


5. Host iptables filter delete
iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable



6. Host iptables NAT DNAT port change

# overcloud Horizon port forwarding

iptables -t nat -I PREROUTING -i eth0 -p tcp --dport 443 -j DNAT --to-destination 192.0.2.21


# ALS port forwarding

iptables -t nat -I PREROUTING -i eth0 -p tcp --dport 80 -j DNAT --to-destination 192.0.8.143




13. Host 에서 콘솔 접속할 수 있게 열기

# ssh 192.0.2.1 -R 443:<overcloud IP>:443 -L <laptop IP>:443:127.0.0.1:443

# ssh 192.0.2.1 -R 443:192.0.2.24:443 -L 172.23.147.187:443:127.0.0.1:443



14. connecting to the demo vm

# ssh debian@192.0.8.141



15. overcloud scheduler memory ratio 변경

# ssh heat-admin@192.0.2.23                  # overcloud-controllerMgmt

$ sudo su -

# vi /etc/nova/nova.conf

...

ram_allocation_ratio=100

...

# restart nova-scheduler

# exit


# 다른 over cloud 도 수정

# ssh heat-admin@192.0.2.27             # overcloud-controller0

# ssh heat-admin@192.0.2.28             # overcloud-controller1





16. monitoring 접속

http://<under cloud ip>/icinga           # icingaadmin / icingaadmin



17. undercloud logging 에 접속하기위해 Kibana 패스워드 알기

ssh heat-admin@<undercloud IP>

cat  /opt/kibana/htpasswd.cfg

http://<under cloud ip>:81                   # kibana / ?????




# vm 백업

# tripleo/tripleo-incubator/scripts/hp_ced_host_manager.sh --save-vms


# vm Recover

# tripleo/tripleo-incubator/scripts/hp_ced_host_manager.sh --resume-vms





[ HDP install ]


1. HP Development Platform Community Version 다운로드

https://helion.hpwsportal.com/catalog.html#/Home/Show


2. HDP 설치 문서

https://docs.hpcloud.com/helion/devplatform/install/community


* Host(base) 에서 설치하거나 Seed 에서 설치할 수 있음


3. Seed에서 필요 s/w 설치

# pip install cffi enum34 pyasn1 virtualenv

# scp -o StrictHostKeyChecking=no 192.0.2.21:/usr/local/share/ca-certificates/ephemeralca-cacert.crt /root/ephemeralca-cacert.crt


tar -zxvf hp_helion_devplatform_community.tar.gz

cd dev-platform-installer

# ./DevelopmentPlatform_Enable.sh \
    -u admin \
    -p bd9352ceed184839e2231d2a13062d461928b857 \     # admin-password
    -a 192.0.2.21 \                                                                           # overcloud ip
    -i c1821d8687f14fd4b74c11892f5d7af0 \                            # tenant-id
    -e /root/ephemeralca-cacert.crt \



3. Host(Base)에 필요 s/w 설치

# sudo apt-get install -y python-dev libffi-dev libssl-dev python-virtualenv python-pip

# mkdir -p hdp_work

# cd hdp_work

tar -zxvf /home/stack/Downloads/HDP/hp_helion_devplatform_community.tar.gz

cd dev-platform-installer

./DevelopmentPlatform_Setup.sh -p {admin_user_password} -a {auth_keystone_ip_address}

./DevelopmentPlatform_Setup.sh -p 2c0ee7b859261caf96a3069c60f516de1e3682c9 -a 192.0.2.21


혹은 아래와 같이 -n (username) -t (tenant name) 을 지정

# ./DevelopmentPlatform_Setup.sh -r regionOne -n admin -p 2c0ee7b859261caf96a3069c60f516de1e3682c9 -t admin -a '192.0.2.21'

# admin password 를 모를 경우 다음과 같이 실행

# cat /root/tripleo/tripleo-overcloud-passwords


# Keystone ip 를 모를 경우 다음과 같이 실행

# . /root/tripleo/tripleo-overcloud-passwords

# TE_DATAFILE=/root/tripleo/ce_env.json . /root/tripleo/tripleo-incubator/undercloudrc

# heat output-show overcloud KeystoneURL




5. cluster 설정을 위한 client tool 다운로드

http://docs.hpcloud.com/helion/devplatform/1.2/ALS-developer-trial-quick-start/2

cf-mgmt 와 ALS Client 다운로드

# host 에서 파일을 seed로 복사

$ unzip *.zip

$ scp helion-1.2.0.1-linux-glibc2.3-x86_64/helion root@192.0.2.1:client

$ scp linux-amd64/cf-mgmt root@192.0.2.1:client


# seed에서 수행

6. Create Cluster

$ vi ~/.profile

export PATH=$PATH:/root/client/cf-mgmt:/root/client/helion:.


$ cf-mgmt update









===========================   참고 ======================



5. VM을 위한 DNS 세팅

vi tripleo/hp_passthrough/overcloud_neutron_dhcp_agent.json

{"option":"dhcp_delete_namespaces","value":"True"},

{"option":"dnsmasq_dns_servers","value":"203.236.1.12,203.236.20.11"}


vi tripleo/hp_passthrough/undercloud_neutron_dhcp_agent.json

{"option":"dhcp_delete_namespaces","value":"True"},

{"option":"dnsmasq_dns_servers","value":"203.236.1.12,203.236.20.11"}



6. VM root disk 위치 수정

# mkdir -p /data/libvirt/images           # vm qcow2 이미지를 생성할 디렉토리 미리 생성

# vi /root/tripleo/tripleo-incubator/scripts/hp_ced_host_manager.sh

...

IMAGES_DIR=${IMAGES_DIR:-"/data/libvirt/images"}    # 127 라인 디렉토리 변경

...


# virsh pool-dumpxml default > pool.xml


# vi pool.xml

<pool type='dir'>

  <name>default</name>

  <uuid>9690731d-e0d1-49d1-88a4-b25bccc78418</uuid>

  <capacity unit='bytes'>436400848896</capacity>

  <allocation unit='bytes'>2789785694208</allocation>

  <available unit='bytes'>18446741720324706304</available>

  <source>

  </source>

  <target>

    <path>/data/libvirt/images</path>

    <permissions>

      <mode>0711</mode>

      <owner>-1</owner>

      <group>-1</group>

    </permissions>

  </target>

</pool>


# virsh pool-destroy default

# virsh pool-create pool.xml



8. 아래 파일의 해당 라인의 IP 변경 : 192.0.8.0 -> 192.10.8.0,      192.0.15.0 -> 192.10.15.0

./tripleo/tripleo-incubator/scripts/hp_ced_host_manager.sh:800

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:70

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:71

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:72

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:181

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:182

./tripleo/tripleo-incubator/scripts/hp_ced_setup_net.sh:183



# undercloud, overcloud 설치 시 변수 셋팅

# export OVERCLOUD_NEUTRON_DVR=False

# export OVERCLOUD_CINDER_LVMLOOPDEVSIZE=500000      # 필요시 필요한 양만큼


# seed locale 변경

# locale-gen en_US.UTF-8

# sudo dpkg-reconfigure locales          # 필요시



# 변수 세팅  (이건 Comm 버전에서 에러 날 때)

# vi ./tripleo/tripleo-incubator/scripts/hp_ced_setup_cloud_env.sh

...

export OVERCLOUD_CONTROLSCALE=${OVERCLOUD_CONTROLSCALE:-2}    40 라인 변경

...


13. vm dns 를 초반에 설정 못했을 때 변경하기

. /root/tripleo/tripleo-overcloud-passwords

TE_DATAFILE=/root/tripleo/ce_env.json

. /root/tripleo/tripleo-incubator/undercloudrc

# neutron subnet-list

neutron subnet-update --dns-nameserver 203.236.1.12 --dns-nameserver 203.236.20.11 c4316d44-e2ae-43fb-b462-40fa767bd9fb















Posted by Kubernetes Korea co-leader seungkyua@gmail.com