Skip to content

Provision Sample Outputs

Victor San Kho Lin edited this page May 1, 2017 · 3 revisions

Sample Screen Output of Provisioning CouchDB Cluster

Note that it should further configure through Fauxton UI to complete CouchDB Cluster setup.

victorskl@LOCALHOST:~/cloudcode/project2/goal$ python3 -m automaton.provision
# --------------- CURRENT PROVISION
Reservation r-v0zqbwi6 has 1 instances.
        i-4bf80167 130.56.253.117 NCI running

# --------------- NEW PROVISION
How many instances require? (1-50): 2
Create 2 new instances? (y or n): y
Creating instances....
Creating status: pending
Creating status: pending
Creating status: pending
Creating status: pending
Done.
Reservation r-0eg2d92v has 2 instances.
        i-8444af02 115.146.93.31 melbourne-np running
        i-c0fdded9  melbourne pending

# --------------- AVAILABLE RESOURCES
0:      i-4bf80167      130.56.253.117  running NCI
1:      i-8444af02      115.146.93.31   running melbourne-np
2:      i-c0fdded9      115.146.93.255  running melbourne-np

Pick instances (a or 0 1 3): 1 2
[ check ]  Selected [Instance:i-8444af02, Instance:i-c0fdded9]

# --------------- PREFLIGHT CHECK
Give provision name for server group (couchdbs):
[ check ]  couchdbs
Give name for host file (hosts.ini):
[ check ]  hosts.ini
Writing host file... hosts.ini

# --------------- FIREWALL
Open ports? (y or n): y
0:      sg-0cdb768f     e57f01279e0c4ad8be600ec415cdc432        default Default security group
1:      sg-2b3e8377     e57f01279e0c4ad8be600ec415cdc432        ssh     Port 22

Pick security group (1 or 3): 0
Enter ports to open: 9584 9586
[ check ]  Adding rules to security group default with opening ports [9584, 9586]

# --------------- ANSIBLING
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.93.31
# 115.146.93.31 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
[ check ]  Added new known host:  115.146.93.31.
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.93.255
# 115.146.93.255 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
[ check ]  Added new known host:  115.146.93.255.
Enter login username (ubuntu):
Enter playbook recipe path (automaton/playbooks/couchdb.yaml):
ansible-playbook -i hosts.ini -u ubuntu -b --become-method=sudo --key-file=/path/to/nectar/key_pair.pem automaton/playbooks/couchdb.yaml --extra-vars "admin=xXx cookie=xXXXxxXXxxX password=XxXxXXxxX"
Execute? (y or n): y
Launching in T minus time
00:00
..................................................

PLAY [couchdbs] ****************************************************************

TASK [setup] *******************************************************************
ok: [115.146.93.31]
ok: [115.146.93.255]

TASK [System Update] ***********************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Check CouchDB Service Exist] *********************************************
ok: [115.146.93.255]
ok: [115.146.93.31]

TASK [Stop CouchDB Service If Exist] *******************************************
skipping: [115.146.93.31]
skipping: [115.146.93.255]

TASK [Setup Build Environment] *************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Download CouchDB] ********************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Unpack CouchDB Source] ***************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Configure Build] *********************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Make Build] **************************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Deploy CouchDB] **********************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Add CouchDB System Account] **********************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Change CouchDB Ownership] ************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Change CouchDB Config File Permission] ***********************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Change CouchDB Directory Permission] *************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Change Node Name] ********************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Set Cookie] **************************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Bind Cluster Address to Public] ******************************************
changed: [115.146.93.31] => (item=bind_address = 0.0.0.0)
changed: [115.146.93.255] => (item=bind_address = 0.0.0.0)
changed: [115.146.93.255] => (item=port = 9584)
changed: [115.146.93.31] => (item=port = 9584)

TASK [Bind Node Address to Public] *********************************************
ok: [115.146.93.255] => (item=bind_address = 0.0.0.0)
ok: [115.146.93.31] => (item=bind_address = 0.0.0.0)
changed: [115.146.93.255] => (item=port = 9586)
changed: [115.146.93.31] => (item=port = 9586)

TASK [Add Admin User] **********************************************************
changed: [115.146.93.31]
changed: [115.146.93.255]

TASK [Install CouchDB Service] *************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Enable CouchDB Service] **************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

TASK [Start CouchDB Service] ***************************************************
changed: [115.146.93.255]
changed: [115.146.93.31]

PLAY RECAP *********************************************************************
115.146.93.255             : ok=21   changed=19   unreachable=0    failed=0
115.146.93.31              : ok=21   changed=19   unreachable=0    failed=0

Sample Screen Output of Volume Provisioning

victorskl@LOCALHOST:~/cloudcode/project2/goal$ python3 -m automaton.provision
# --------------- CURRENT PROVISION
Reservation r-d3i1sr7z has 4 instances.
        i-573b8ab9 115.146.94.41 melbourne-np running
        i-4656705c 115.146.94.254 melbourne-np running
        i-0dea53b7 115.146.94.64 melbourne-np running
        i-0e995905 115.146.94.45 melbourne-np running

# --------------- NEW PROVISION
Create new instances? (y or n):

# --------------- AVAILABLE RESOURCES
0:      i-573b8ab9      115.146.94.41   running melbourne-np
1:      i-4656705c      115.146.94.254  running melbourne-np
2:      i-0dea53b7      115.146.94.64   running melbourne-np
3:      i-0e995905      115.146.94.45   running melbourne-np

Pick instances (a or 0 1 3):
[ check ]  Selected [Instance:i-573b8ab9, Instance:i-4656705c, Instance:i-0dea53b7, Instance:i-0e995905]

# --------------- PREFLIGHT CHECK
Give provision name for server group (couchdbs):
[ check ]  couchdbs
Give name for host file (hosts.ini):
[ check ]  hosts.ini
Writing host file... hosts.ini

# --------------- FIREWALL
Open ports? (y or n):

# --------------- VOLUMES PROVISION
Provision volumes? (y or n): y
Create new volumes? (y or n): y
How many? (num > 0): 4
Enter size GB (size > 0): 62
vol-57ee0498 is successfully created in melbourne-np zone and available now.
vol-73c252e1 is successfully created in melbourne-np zone and available now.
vol-e7c5bd6d is successfully created in melbourne-np zone and available now.
vol-99e83cf8 is successfully created in melbourne-np zone and available now.

# --------------- VOLUMES ALLOCATION
        vol-99e83cf8    62GB    melbourne-np    available
        vol-e7c5bd6d    62GB    melbourne-np    available
        vol-73c252e1    62GB    melbourne-np    available
        vol-57ee0498    62GB    melbourne-np    available

Do you like to attach vol-99e83cf8 now? (y or n): y
Pick an instance index number: 0
Enter device name (/dev/vdc):
[ check ]  Volume vol-99e83cf8 has successfully attached to i-573b8ab9.
Do you like to attach vol-e7c5bd6d now? (y or n): y
Pick an instance index number: 1
Enter device name (/dev/vdc):
[ check ]  Volume vol-e7c5bd6d has successfully attached to i-4656705c.
Do you like to attach vol-73c252e1 now? (y or n): y
Pick an instance index number: 2
Enter device name (/dev/vdc):
[ check ]  Volume vol-73c252e1 has successfully attached to i-0dea53b7.
Do you like to attach vol-57ee0498 now? (y or n): y
Pick an instance index number: 3
Enter device name (/dev/vdc):
[ check ]  Volume vol-57ee0498 has successfully attached to i-0e995905.

# --------------- ANSIBLING
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.94.41
# 115.146.94.41 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.94.254
# 115.146.94.254 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.94.64
# 115.146.94.64 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
[ check ]  Scanning host :       ssh-keyscan -4 -t rsa 115.146.94.45
# 115.146.94.45 SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.1
Enter login username (ubuntu):
Enter playbook recipe path (automaton/playbooks/):
[ check ]  automaton/playbooks/
Enter playbook recipe path (test.yaml): vol_mount_backup.yaml
Verbose i.e. '-v' (y): y
ansible-playbook -i hosts.ini -u ubuntu -b --become-method=sudo --key-file=/path/to/my_key.pem -v automaton/playbooks/vol_mount_backup.yaml --extra-vars "admin=admin cookie=biskitz password=secret"
Execute? (y or n): y
Launching in T minus time
00:00
..................................................
No config file found; using defaults

PLAY [couchdbs] ****************************************************************

TASK [setup] *******************************************************************
ok: [115.146.94.254]
ok: [115.146.94.45]
ok: [115.146.94.41]
ok: [115.146.94.64]

TASK [Creates Directory] *******************************************************
ok: [115.146.94.45] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/media/backup", "size": 4096, "state": "directory", "uid": 0}
ok: [115.146.94.254] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/media/backup", "size": 4096, "state": "directory", "uid": 0}
ok: [115.146.94.64] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/media/backup", "size": 4096, "state": "directory", "uid": 0}
ok: [115.146.94.41] => {"changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/media/backup", "size": 4096, "state": "directory", "uid": 0}

TASK [Make File System] ********************************************************
changed: [115.146.94.254] => {"changed": true}
changed: [115.146.94.45] => {"changed": true}
changed: [115.146.94.64] => {"changed": true}
changed: [115.146.94.41] => {"changed": true}

TASK [Mount Backup] ************************************************************
changed: [115.146.94.254] => {"changed": true, "dump": "0", "fstab": "/etc/fstab", "fstype": "ext4", "name": "/media/backup", "opts": "defaults", "passno": "0", "src": "/dev/vdc"}
changed: [115.146.94.45] => {"changed": true, "dump": "0", "fstab": "/etc/fstab", "fstype": "ext4", "name": "/media/backup", "opts": "defaults", "passno": "0", "src": "/dev/vdc"}
changed: [115.146.94.41] => {"changed": true, "dump": "0", "fstab": "/etc/fstab", "fstype": "ext4", "name": "/media/backup", "opts": "defaults", "passno": "0", "src": "/dev/vdc"}
changed: [115.146.94.64] => {"changed": true, "dump": "0", "fstab": "/etc/fstab", "fstype": "ext4", "name": "/media/backup", "opts": "defaults", "passno": "0", "src": "/dev/vdc"}

PLAY RECAP *********************************************************************
115.146.94.254             : ok=4    changed=2    unreachable=0    failed=0
115.146.94.41              : ok=4    changed=2    unreachable=0    failed=0
115.146.94.45              : ok=4    changed=2    unreachable=0    failed=0
115.146.94.64              : ok=4    changed=2    unreachable=0    failed=0



... [checking newly mounted disk on each nodes]

root@r-d3i1sr7z-0:~# df -h
Filesystem      Size  Used Avail Use% Mounted on
udev            3.9G     0  3.9G   0% /dev
tmpfs           799M   81M  718M  11% /run
/dev/vda1       9.9G  4.5G  5.0G  48% /
tmpfs           3.9G     0  3.9G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs           3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/vdb         59G  8.7G   48G  16% /mnt
tmpfs           799M     0  799M   0% /run/user/1000
/dev/vdc         61G   52M   58G   1% /media/backup


root@r-d3i1sr7z-1:~# df -h
Filesystem      Size  Used Avail Use% Mounted on
udev            3.9G     0  3.9G   0% /dev
tmpfs           799M   81M  718M  11% /run
/dev/vda1       9.9G  2.1G  7.4G  22% /
tmpfs           3.9G     0  3.9G   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs           3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/vdb         59G   55G  1.7G  98% /mnt
tmpfs           799M     0  799M   0% /run/user/1000
/dev/vdc         61G   52M   58G   1% /media/backup

... [so on for the rest of the nodes]

nectar_team1_volume_provision.jpg