Listing 1: Cloud festlegen (inventory/ix/clouds.yaml)

clouds:
  mycloud:
    profile: betacloud
    auth:
      auth_url: https://api-1.betacloud.io:5000/v3
      username: myusername
      user_domain_name: mydomain
      project_name: myprocject
      project_domain_name: mydomain


Listing 2: Variablen konfigurieren (inventory/ix/cluster.tf)
cluster_name = "ix"
az_list = ["south-1"]
public_key_path = "id_rsa_ix.pub"
image = "Ubuntu 18.04"
ssh_user = "ubuntu"

number_of_etcd = 0
number_of_k8s_masters = 0
number_of_k8s_masters_no_etcd = 0
number_of_k8s_masters_no_floating_ip = 1
number_of_k8s_masters_no_floating_ip_no_etcd = 0
flavor_k8s_master = "bd888..."

number_of_k8s_nodes = 0
number_of_k8s_nodes_no_floating_ip = 2
flavor_k8s_node = "bd888..."

network_name = "ix"
external_net = "0647c0..."
subnet_cidr = "192.168.0.0/24"
floatingip_pool = "public"

number_of_bastions = 1
flavor_bastion = "bd888..."
bastion_allowed_remote_ips = ["0.0.0.0/0"]


Listing 3: Cloud-Zuweisung und Ausgabe der Ressourcen
$ export OS_CLOUD=mycloud
$ terraform apply \
    -var-file=cluster.tf \
    contrib/terraform/openstack

Apply complete! Resources: 20 added, 0 changed, 0 destroyed.

Outputs:
bastion_fips = [
    185.136.140.40
]
floating_network_id = 0647c0a0-862c-4c7e-9433-4558fcc5573b
k8s_master_fips = []
k8s_node_fips = []
private_subnet_id = a1670441-1312-4e61-abae-c7cf3d5a4f3f
router_id = 5a76861f-db7d-45d9-a93c-59e6813effaf


Listing 4: Clusterzustand ermitteln
$ cd inventory/ix/artifacts
$ ./kubectl.sh get nodes
NAME                  STATUS     ROLES      AGE       VERSION
ix-k8s-master-nf-1    Ready      master     27m       v1.13.3
ix-k8s-node-nf-1      Ready      node       26m       v1.13.3
ix-k8s-node-nf-2      Ready      node       26m       v1.13.3



Listing 5: berprfen der gestarteten Pods
$ ./kubectl.sh get pods
NAME                           READY    STATUS    RESTARTS   AGE
hello-world-696b6b59bd-7d8md   1/1      Running    0         3m24s
hello-world-696b6b59bd-bz64c   1/1      Running    0         3m24s


Listing 6: Mit describe den Port ermitteln
$ ./kubectl.sh describe services example-service
Name:                      example-service
Namespace:                 default
Labels:                    run=load-balancer-example
Annotations:               <none>
Selector:                  run=load-balancer-example
Type:                      NodePort
IP:                        10.233.1.98
Port:                      <unset>  8080/TCP
TargetPort:                8080/TCP
NodePort:                  <unset>  32732/TCP
Endpoints:                 <none>
Session Affinity:          None
External Traffic Policy:   Cluster
Events:                    <none>


Listing 7: Playbook aufrufen und Clusterstatus prfen
$ ansible-playbook \
    --become \
    --inventory=inventory/ix/hosts \
    --extra-vars="ansible_ssh_private_key_file=inventory/ix/id_rsa_ix" \
    scale.yml

$ cd inventory/ix/artifacts
$ ./kubectl.sh get nodes
NAME                  STATUS    ROLES     AGE    VERSION
ix-k8s-master-nf-1    Ready     master    112m   v1.13.3
ix-k8s-node-nf-1      Ready     node      111m   v1.13.3
ix-k8s-node-nf-2      Ready     node      111m   v1.13.3
ix-k8s-node-nf-3      Ready     node      2m3s   v1.13.3
ix-k8s-node-nf-4      Ready     node      2m4s   v1.13.3


Listing 8: Einen Worker entfernen
$ ansible-playbook \
    --become \
    --inventory=inventory/ix/hosts \
    --private-key=inventory/ix/id_rsa_ix \
    -e "node=ix-k8s-node-nf-4" \
    remove-node.yml
    
$ cd inventory/ix/artifacts
$ ./kubectl.sh get nodes
NAME                 STATUS   ROLES    AGE    VERSION
ix-k8s-master-nf-1   Ready    master   120m   v1.13.3
ix-k8s-node-nf-1     Ready    node     119m   v1.13.3
ix-k8s-node-nf-2     Ready    node     119m   v1.13.3
ix-k8s-node-nf-3     Ready    node     10m    v1.13.3


