diff --git a/.github/workflows/rebase.yaml b/.github/workflows/rebase.yaml new file mode 100644 index 0000000..2875f97 --- /dev/null +++ b/.github/workflows/rebase.yaml @@ -0,0 +1,21 @@ +name: rebase + +on: + pull_request: + types: [opened] + issue_comment: + types: [created] + +jobs: + rebase: + if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') && (github.event.comment.author_association == 'CONTRIBUTOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Automatic Rebase + uses: cirrus-actions/rebase@1.3.1 + env: + GITHUB_TOKEN: ${{ secrets.PAT }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..6a05d03 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,39 @@ +name: Create and publish a Docker image + +on: [release] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and push Docker image + uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000..aaf7509 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,37 @@ +name: test + +on: + pull_request: + push: + branches: + - main + +jobs: + unit: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Restore Go cache + uses: actions/cache@v1 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: 1.16.x + - name: Setup Kubebuilder + uses: fluxcd/pkg/actions/kubebuilder@main + - name: Run tests + run: make test + env: + KUBEBUILDER_ASSETS: ${{ github.workspace }}/kubebuilder/bin + - name: Check if working tree is dirty + run: | + if [[ $(git diff --stat) != '' ]]; then + echo 'run make test and commit changes' + exit 1 + fi diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..01259c6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out + +# Local build output dir +bin/ + +# IDE files +.vscode diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..0bb442a --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Code of Conduct + +Source watcher follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..81891b7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,82 @@ +# Contributing + +Source watcher is [Apache 2.0 licensed](LICENSE) and accepts contributions +via GitHub pull requests. This document outlines some of the conventions on +to make it easier to get your contribution accepted. + +We gratefully welcome improvements to issues and documentation as well as to +code. + +## Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. + +We require all commits to be signed. By signing off with your signature, you +certify that you wrote the patch or otherwise have the right to contribute the +material by the rules of the [DCO](DCO): + +`Signed-off-by: Jane Doe ` + +The signature must contain your real name +(sorry, no pseudonyms or anonymous contributions) +If your `user.name` and `user.email` are configured in your Git config, +you can sign your commit automatically with `git commit -s`. + +## Communications + +The project uses Slack: To join the conversation, simply join the +[CNCF](https://slack.cncf.io/) Slack workspace and use the +[#flux](https://cloud-native.slack.com/messages/flux/) channel. + +The developers use a mailing list to discuss development as well. +Simply subscribe to [flux-dev on cncf.io](https://lists.cncf.io/g/cncf-flux-dev) +to join the conversation (this will also add an invitation to your +Google calendar for our [Flux +meeting](https://docs.google.com/document/d/1l_M0om0qUEN_NNiGgpqJ2tvsF2iioHkaARDeh6b70B0/edit#)). + +### How to run the test suite + +Prerequisites: +* go >= 1.16 +* kubebuilder >= 3.0 +* kustomize >= 4.0 + +You can run the unit tests by simply doing + +```bash +make test +``` + +## Acceptance policy + +These things will make a PR more likely to be accepted: + +- a well-described requirement +- tests for new code +- tests for old code! +- new code and tests follow the conventions in old code and tests +- a good commit message (see below) +- all code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) +- names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1) +- code must build on both Linux and Darwin, via plain `go build` +- code should have appropriate test coverage and tests should be written + to work with `go test` + +In general, we will merge a PR once one maintainer has endorsed it. +For substantial changes, more people may become involved, and you might +get asked to resubmit the PR or divide the changes into more than one PR. + +### Format of the Commit Message + +For Kustomize Controller we prefer the following rules for good commit messages: + +- Limit the subject to 50 characters and write as the continuation + of the sentence "If applied, this commit will ..." +- Explain what and why in the body, if more than a trivial change; + wrap it at 72 characters. + +The [following article](https://chris.beams.io/posts/git-commit/#seven-rules) +has some more helpful advice on documenting your work. diff --git a/DCO b/DCO new file mode 100644 index 0000000..716561d --- /dev/null +++ b/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..fa1ae62 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,34 @@ +FROM golang:1.16-alpine as builder + +WORKDIR /workspace + +# copy modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# cache modules +RUN go mod download + +# copy source code +COPY main.go main.go +COPY controllers/ controllers/ +COPY osmops/ osmops/ + +# build +RUN CGO_ENABLED=0 go build -a -o source-watcher main.go + +FROM alpine:3.13 + +RUN apk add --no-cache ca-certificates tini + +COPY --from=builder /workspace/source-watcher /usr/local/bin/ + +# Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries. +# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460 +RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf + +RUN addgroup -S controller && adduser -S controller -G controller + +USER controller + +ENTRYPOINT [ "/sbin/tini", "--", "source-watcher" ] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..7983111 --- /dev/null +++ b/Makefile @@ -0,0 +1,92 @@ + +# Image URL to use all building/pushing image targets +IMG ?= ghcr.io/martel-innovate/osmops:latest +# Produce CRDs that work back to Kubernetes 1.16 +CRD_OPTIONS ?= crd:crdVersions=v1 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +all: manager + +# Run tests +test: generate fmt vet manifests + go test ./... -coverprofile cover.out + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet manifests + go run ./main.go + +# Install CRDs into a cluster +install: manifests + kustomize build config/crd +# kustomize build config/crd | kubectl apply -f - +# ^ potentially harmful. what if you're connected to the wrong cluster?! + +# Uninstall CRDs from a cluster +uninstall: manifests + kustomize build config/crd +# kustomize build config/crd | kubectl delete -f - +# ^ potentially harmful. what if you're connected to the wrong cluster?! + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests + cd config/manager && kustomize edit set image source-watcher=${IMG} + kustomize build config/default +# kustomize build config/default | kubectl apply -f - +# ^ potentially harmful. what if you're connected to the wrong cluster?! + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=source-reader webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + +# Push the docker image +# TODO: get rid of this mess. Use GH actions when moving to own repo. +# NOTE. docker login stores the token in osxkeychain, logout removes it from +# there. Not sure why GH recommends using their tool (gh), since as you can +# see the token is stored unencrypted and with no password protection! +docker-push: + grep oauth_token ~/.config/gh/hosts.yml | sed 's/.*oauth_token: //' | docker login ghcr.io -u c0c0n3 --password-stdin + docker push ${IMG} + docker logout ghcr.io + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + @{ \ + set -e ;\ + CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ + cd $$CONTROLLER_GEN_TMP_DIR ;\ + go mod init tmp ;\ + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.5.0 ;\ + rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ + } +CONTROLLER_GEN=$(GOBIN)/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) +endif diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..c764c70 --- /dev/null +++ b/PROJECT @@ -0,0 +1,3 @@ +domain: fluxcd.io +repo: github.com/fluxcd/source-watcher +version: "2" diff --git a/README.md b/README.md index 8a2a998..b071a3c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,89 @@ -# osmops -GitOps for Open Source MANO, powered by the FluxCD Toolkit (https://toolkit.fluxcd.io) +OSM Ops +------- +> GitOps for Open Source MANO. + +OSM Ops is a cloud-native micro-service to implement GitOps workflows +within [Open Source MANO][osm] (OSM). The basic idea is to describe +the state of an OSM deployment through version-controlled text files +hosted in an online Git repository. Each file declares a desired instantiation +and runtime configuration for some of the services in a specified OSM +cluster. Collectively, the files at a given Git revision describe the +deployment state of the these services at a certain point in time. +OSM Ops monitors the Git repository in order to automatically reconcile +the desired deployment state with the actual live state of the OSM +cluster. OSM Ops is implemented as a [Kubernetes][k8s] operator that +plugs into the [FluxCD][flux] framework in order to leverage the rich +Kubernetes/FluxCD GitOps ecosystem. + +This software has been developed by [Martel Innovate][martel] as part +of the [Affordable5G][a5g] EU-funded project. OSM Ops serves the specific +needs of Affordable5G and is not intended as a replacement or alternative +to OSM's own deployment and operations tools, rather as a complement. + + +### Documentation + +The [introduction section][arch.intro] of the [software architecture +document][arch] is the best starting point to learn about OSM Ops: +it is a short read but presents the fundamental ideas clearly with +the help of diagrams. The reader interested in gaining a deeper technical +understanding of OSM Ops is invited to consider the remainder of the +document too. [Hands-on tutorials][demos] demonstrate the core features +and exemplify deployment scenarios. + + +### Features at a glance + +- **Declarative approach**. Edit YAML files to specify which KNFs should + be in the target OSM cluster and their configuration. OSM Ops determines + whether to create a new KNF or update an existing one, then issues the + OSM commands to realise your configuration. OSM Ops can also create or + update OSM packages. +- **GitOps workflow**. Keep your OSM Ops YAML files in an online Git + repository. OSM Ops automatically detects new commits and reconciles + the deployment state declared in the YAML files with the actual live + state of the OSM cluster. +- **Multi-repo/multi-cluster**. All the OSM Ops files in a Git repository + target the same OSM cluster. However, you can have OSM Ops monitor multiple + repositories if you need to manage several distinct OSM clusters at once. +- **Secure handling of OSM credentials**. Use Kubernetes secrets to provide + the username, password and project for OSM Ops to connect to the target + OSM cluster. +- **Repo file filters**. Optionally specify filters to match OSM Ops YAML + files in your repository. Speeds up processing if there are a large number + of files (e.g. source code, documents, etc.) that OSM Ops should not read. +- **Efficient batch processing**. Up to 6x faster and 89% bandwidth savings + when processing many KNF create/update operations compared to using the + `osm` CLI—thanks to caching (NS descriptors, VIM accounts, etc.) and + smart management of authorisation token lifecycle. + + +### Project status + +- Early days. But the code is solid (modular, close to 100% test coverage) + and is a good foundation for further development. +- Successfully deployed and run the Malaga Nov 2021 demo; ready for the + Malaga end-to-end tests in Q3 2022. +- Only create/update KNF available. No rollbacks—delete not implemented. + But you can still rollback to a previous Git version as long as the set + of KNFs is the same in both versions. +- OSM packaging functionality partially relies on naming conventions. A + reasonable choice given the current phase of the project, but it could + be improved in later iterations. ([Details][pkg].) + + + +[arch]: ./docs/arch/README.md +[arch.intro]: ./docs/arch/intro.md +[a5g]: https://www.affordable5g.eu/ + "Affordable5G" +[demos]: ./docs/demos/README.md +[flux]: https://fluxcd.io/ + "Flux - the GitOps family of projects" +[k8s]: https://en.wikipedia.org/wiki/Kubernetes + "Kubernetes" +[martel]: https://www.martel-innovate.com/ + "Martel Innovate" +[osm]: https://osm.etsi.org/ + "Open Source MANO" +[pkg]: ./docs/osm-pkgs.md \ No newline at end of file diff --git a/_deployment_/kdu/ldap.ops.yaml b/_deployment_/kdu/ldap.ops.yaml new file mode 100644 index 0000000..e07539d --- /dev/null +++ b/_deployment_/kdu/ldap.ops.yaml @@ -0,0 +1,10 @@ +kind: NsInstance +name: ldap +description: Demo LDAP NS instance +nsdName: openldap_ns +vnfName: openldap +vimAccountName: mylocation1 +kdu: + name: ldap + params: + replicaCount: "1" diff --git a/_deployment_/osm-pkgs/openldap_knf/openldap_vnfd.yaml b/_deployment_/osm-pkgs/openldap_knf/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/_deployment_/osm-pkgs/openldap_knf/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/_deployment_/osm-pkgs/openldap_ns/README.md b/_deployment_/osm-pkgs/openldap_ns/README.md new file mode 100644 index 0000000..8424611 --- /dev/null +++ b/_deployment_/osm-pkgs/openldap_ns/README.md @@ -0,0 +1,26 @@ +# SIMPLE OPEN-LDAP CHART + +Descriptors that installs an openldap version 1.2.1 chart in a K8s cluster + +There is one VNF (openldap\_vnf) with only one KDU. + +There is one NS that connects the VNF to a mgmt network + +## Onboarding and instantiation + +```bash +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --ssh_keys ${HOME}/.ssh/id_rsa.pub +``` + +### Instantiation option + +Some parameters could be passed during the instantiation. + +* replicaCount: Number of Open LDAP replicas that will be created + +```bash +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", "additionalParams": {"replicaCount": "2"}}]}' +``` + diff --git a/_deployment_/osm-pkgs/openldap_ns/openldap_nsd.yaml b/_deployment_/osm-pkgs/openldap_ns/openldap_nsd.yaml new file mode 100644 index 0000000..b995164 --- /dev/null +++ b/_deployment_/osm-pkgs/openldap_ns/openldap_nsd.yaml @@ -0,0 +1,22 @@ +nsd: + nsd: + - description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: 'true' + vnfd-id: + - openldap_knf diff --git a/_deployment_/osmops.deploy.yaml b/_deployment_/osmops.deploy.yaml new file mode 100644 index 0000000..209e2c0 --- /dev/null +++ b/_deployment_/osmops.deploy.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + control-plane: controller + name: source-watcher + namespace: flux-system +spec: + replicas: 1 + selector: + matchLabels: + app: source-watcher + template: + metadata: + annotations: + prometheus.io/port: "8080" + prometheus.io/scrape: "true" + labels: + app: source-watcher + spec: + serviceAccountName: source-controller + terminationGracePeriodSeconds: 10 + containers: + - args: + - --log-level=debug + - --log-encoding=console + env: + - name: RUNTIME_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: ghcr.io/martel-innovate/osmops:latest + imagePullPolicy: IfNotPresent + name: manager + ports: + - containerPort: 8080 + name: http-prom + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 50m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - name: osmops + mountPath: "/etc/osmops" + readOnly: true + - mountPath: /tmp + name: tmp + volumes: + - name: osmops + secret: + secretName: nbi-connection + - name: tmp + emptyDir: {} diff --git a/_deployment_/secret.yaml b/_deployment_/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/_deployment_/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/_tmp/custom-osm-lcm/failed-steps.md b/_tmp/custom-osm-lcm/failed-steps.md new file mode 100644 index 0000000..28527d8 --- /dev/null +++ b/_tmp/custom-osm-lcm/failed-steps.md @@ -0,0 +1,177 @@ +LCM fails +--------- +> epic! + +So building & deploying a custom OSM LCM image has been lots of fun! +Here's what didn't work and possible workarounds. + + +### OSM 11 VM + +Had to build it a couple of times. Some of the install script tasks +[failed][failed-osm-install] but the script went ahead. Eventually +I ended up with a broken OSM install in my hands---some OSM services +didn't get deployed to the K8s cluster. Not sure what the cause of +those random failure is, possibly lack of enough compute resources +and the install procedure not being robust enough to cater for slow +boxes? + + +### LCM build failures - part 1 + +If you bump into this lovely error about Docker permissions when +building the LCM image artifacts + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,LCM,NBI stage-2 +Performing Stage 2 +Building common +Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.24/build?buildargs=%7B%7D&cachefrom=%5B%5D&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels=%7B%7D&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&t=common-stage2&target=&ulimits=null&version=1": dial unix /var/run/docker.sock: connect: permission denied +docker: Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Post "http://%2Fvar%2Frun%2Fdocker.sock/v1.24/containers/create": dial unix /var/run/docker.sock: connect: permission denied. +See 'docker run --help'. +Failed to build common +``` + +It could be because you've got to log out the VM after installing +OSM. To be on the safe side, it's actually best to shut down and +then restart the VM after installation. + + +### LCM build failures - part 2 + +So going past the first hurdle was relatively easy. But then the +command to build the LCM image artifacts took about 50 mins and +I didn't get a clean build in the end: + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,RO,LCM,NBI stage-2 +... +dpkg-deb: building package 'python3-n2vc' in '../python3-n2vc_11.0.0rc1.post36+g23c4455-1_all.deb'. + dpkg-genbuildinfo + dpkg-genchanges >../n2vc_11.0.0rc1.post36+g23c4455-1_amd64.changes +dpkg-genchanges: info: including full source code in upload + dpkg-source --after-build . +dpkg-source: info: using options from n2vc-11.0.0rc1.post36+g23c4455/debian/source/options: --extend-diff-ignore=\.egg-info$ +dpkg-buildpackage: info: full upload (original source is included) +dist run-test: commands[3] | sh -c 'rm n2vc/requirements.txt' +____________________________________________________________ summary _____________________________________________________________ + dist: commands succeeded + congratulations :) +renamed './deb_dist/python3-n2vc_11.0.0rc1.post36+g23c4455-1_all.deb' -> '/home/ubuntu/snap/qhttp/common/python3-n2vc_11.0.0rc1.post36+g23c4455-1_all.deb' +Directory /home/ubuntu/workspace/RO does not exist +% echo $? +1 +``` + +Going ahead to the next step anyway, just in case the build failure +wasn't critical... + +```console +% devops/tools/local-build.sh --module LCM stage-3 +... +Step 14/57 : RUN curl $PYTHON3_OSM_LCM_URL -o osm_lcm.deb + ---> Running in 92e6b11d10dc +curl: no URL specified! +curl: try 'curl --help' or 'curl --manual' for more information +The command '/bin/sh -c curl $PYTHON3_OSM_LCM_URL -o osm_lcm.deb' returned a non-zero code: 2 +Failed to build lcm +``` + +Oh deary, deary. Maybe I shouldn't have gone ahead. + + +### LCM build failures - part 3 + +So it turns out the reason for this error message + +> Directory /home/ubuntu/workspace/RO does not exist + +is that the command + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,RO,LCM,NBI stage-2 +``` + +tries to build an OSM component called RO. In fact there's an RO repo. +Since the command also tries building NBI, we're going to clone and set +up these two repos too: + +```console +% git clone https://osm.etsi.org/gerrit/osm/RO +% git clone https://osm.etsi.org/gerrit/osm/NBI +% for r in IM LCM N2VC NBI RO common devops; do cp commit-msg $r/.git/hooks/; done +``` + +Now running again the build command got me past the directory error, +but the build seems to get into an infinite loop when installing RO +deps + +```console +... +dist_ro_vim_vmware installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +``` + +it just sits there for half an hour seemingly making no progress. +Could it be an issue with VMWare deps? Well, I killed the process +and ran the command again. And again the process got stuck on installing +deps + +```console +... +dist_ro_vim_vmware installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +... +dist_ro_sdn_odl_of installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +``` + +Notice how this time the VMWare deps step succeeded while the build +got stuck on another component. As a last ditch attempt I tried building +just the RO component + +```console +% devops/tools/local-build.sh --module RO stage-2 +``` + +But the build got stuck again on another `installdeps` step + +```console +... +dist_ro_vim_vmware installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +... +dist_ro_sdn_odl_of installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +... +dist_ro_sdn_floodlight_of installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +``` + +I ran it once more and it got stack on an `installdeps` step of a +component that worked in all previous runs + +```console +... +dist_ro_vim_openvim installdeps: -r/build/requirements.txt, -r/build/requirements-dist.txt +``` + +Deadlock bug? + + +### LCM build failures - part 4 + +So there's no way we can build RO. All we can do is exclude it from +the build and hope we can still build LCM + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,LCM,NBI stage-2 +``` + +With this tweak the build succeeds and we can also go past the LCM +URL failure in creating the Docker image + +```console +% devops/tools/local-build.sh --module LCM stage-3 +``` + +The command runs cleanly and tags `opensourcemano/lcm:devel`. + + + + +[failed-osm-install]: ./osm-install/install.failed.log diff --git a/_tmp/custom-osm-lcm/osm-install/install.failed.log b/_tmp/custom-osm-lcm/osm-install/install.failed.log new file mode 100644 index 0000000..a41959d --- /dev/null +++ b/_tmp/custom-osm-lcm/osm-install/install.failed.log @@ -0,0 +1,1458 @@ + Checking required packages to add ETSI OSM debian repo: software-properties-common apt-transport-https +OK +Get:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease [4086 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:7 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2309 kB] +Get:8 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [402 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:10 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/devops amd64 Packages [483 B] +Get:11 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [797 kB] +Get:12 http://security.ubuntu.com/ubuntu bionic-security/restricted Translation-en [109 kB] +Get:13 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1212 kB] +Get:14 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [279 kB] +Get:15 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [19.0 kB] +Get:16 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [3836 B] +Get:17 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2650 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main Translation-en [492 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [829 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/restricted Translation-en [114 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1826 kB] +Get:24 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [396 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [24.9 kB] +Get:26 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6012 B] +Get:27 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.8 kB] +Get:28 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [5016 B] +Get:29 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [11.6 kB] +Get:30 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [5864 B] +Fetched 25.5 MB in 8s (3116 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 15 not upgraded. +Need to get 731 kB of archives. +After this operation, 5661 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/devops amd64 osm-devops all 11.0.3-1 [731 kB] +Fetched 731 kB in 0s (2394 kB/s) +Selecting previously unselected package osm-devops. +(Reading database ... 60493 files and directories currently installed.) +Preparing to unpack .../osm-devops_11.0.3-1_all.deb ... +Unpacking osm-devops (11.0.3-1) ... +Setting up osm-devops (11.0.3-1) ... +## Wed Jun 29 14:39:38 CEST 2022 source: INFO: logging sourced +## Wed Jun 29 14:39:38 CEST 2022 source: INFO: config sourced +## Wed Jun 29 14:39:38 CEST 2022 source: INFO: container sourced +## Wed Jun 29 14:39:38 CEST 2022 source: INFO: git_functions sourced +## Wed Jun 29 14:39:38 CEST 2022 source: INFO: track sourced +Checking required packages: git wget curl tar snapd +/. +/etc +/etc/bash_completion.d +/etc/bash_completion.d/git-prompt +/usr +/usr/bin +/usr/bin/git +/usr/bin/git-shell +/usr/bin/git-upload-pack +/usr/lib +/usr/lib/git-core +/usr/lib/git-core/git +/usr/lib/git-core/git-add--interactive +/usr/lib/git-core/git-bisect +/usr/lib/git-core/git-credential-cache +/usr/lib/git-core/git-credential-cache--daemon +/usr/lib/git-core/git-credential-store +/usr/lib/git-core/git-daemon +/usr/lib/git-core/git-difftool--helper +/usr/lib/git-core/git-fast-import +/usr/lib/git-core/git-filter-branch +/usr/lib/git-core/git-http-backend +/usr/lib/git-core/git-http-fetch +/usr/lib/git-core/git-http-push +/usr/lib/git-core/git-imap-send +/usr/lib/git-core/git-instaweb +/usr/lib/git-core/git-merge-octopus +/usr/lib/git-core/git-merge-one-file +/usr/lib/git-core/git-merge-resolve +/usr/lib/git-core/git-mergetool +/usr/lib/git-core/git-mergetool--lib +/usr/lib/git-core/git-parse-remote +/usr/lib/git-core/git-quiltimport +/usr/lib/git-core/git-rebase +/usr/lib/git-core/git-rebase--am +/usr/lib/git-core/git-rebase--interactive +/usr/lib/git-core/git-rebase--merge +/usr/lib/git-core/git-remote-http +/usr/lib/git-core/git-remote-testsvn +/usr/lib/git-core/git-request-pull +/usr/lib/git-core/git-sh-i18n +/usr/lib/git-core/git-sh-i18n--envsubst +/usr/lib/git-core/git-sh-prompt +/usr/lib/git-core/git-sh-setup +/usr/lib/git-core/git-shell +/usr/lib/git-core/git-show-index +/usr/lib/git-core/git-stash +/usr/lib/git-core/git-submodule +/usr/lib/git-core/git-subtree +/usr/lib/git-core/git-upload-pack +/usr/lib/git-core/git-web--browse +/usr/lib/git-core/mergetools +/usr/lib/git-core/mergetools/araxis +/usr/lib/git-core/mergetools/bc +/usr/lib/git-core/mergetools/bc3 +/usr/lib/git-core/mergetools/codecompare +/usr/lib/git-core/mergetools/deltawalker +/usr/lib/git-core/mergetools/diffmerge +/usr/lib/git-core/mergetools/diffuse +/usr/lib/git-core/mergetools/ecmerge +/usr/lib/git-core/mergetools/emerge +/usr/lib/git-core/mergetools/examdiff +/usr/lib/git-core/mergetools/gvimdiff +/usr/lib/git-core/mergetools/gvimdiff2 +/usr/lib/git-core/mergetools/gvimdiff3 +/usr/lib/git-core/mergetools/kdiff3 +/usr/lib/git-core/mergetools/kompare +/usr/lib/git-core/mergetools/meld +/usr/lib/git-core/mergetools/opendiff +/usr/lib/git-core/mergetools/p4merge +/usr/lib/git-core/mergetools/tkdiff +/usr/lib/git-core/mergetools/tortoisemerge +/usr/lib/git-core/mergetools/vimdiff +/usr/lib/git-core/mergetools/vimdiff2 +/usr/lib/git-core/mergetools/vimdiff3 +/usr/lib/git-core/mergetools/winmerge +/usr/lib/git-core/mergetools/xxdiff +/usr/share +/usr/share/bash-completion +/usr/share/bash-completion/completions +/usr/share/bash-completion/completions/git +/usr/share/doc +/usr/share/doc/git +/usr/share/doc/git/NEWS.Debian.gz +/usr/share/doc/git/README.Debian +/usr/share/doc/git/README.emacs +/usr/share/doc/git/README.md +/usr/share/doc/git/README.source +/usr/share/doc/git/RelNotes +/usr/share/doc/git/RelNotes/1.5.0.1.txt +/usr/share/doc/git/RelNotes/1.5.0.2.txt +/usr/share/doc/git/RelNotes/1.5.0.3.txt +/usr/share/doc/git/RelNotes/1.5.0.4.txt +/usr/share/doc/git/RelNotes/1.5.0.5.txt +/usr/share/doc/git/RelNotes/1.5.0.6.txt +/usr/share/doc/git/RelNotes/1.5.0.7.txt +/usr/share/doc/git/RelNotes/1.5.0.txt +/usr/share/doc/git/RelNotes/1.5.1.1.txt +/usr/share/doc/git/RelNotes/1.5.1.2.txt +/usr/share/doc/git/RelNotes/1.5.1.3.txt +/usr/share/doc/git/RelNotes/1.5.1.4.txt +/usr/share/doc/git/RelNotes/1.5.1.5.txt +/usr/share/doc/git/RelNotes/1.5.1.6.txt +/usr/share/doc/git/RelNotes/1.5.1.txt +/usr/share/doc/git/RelNotes/1.5.2.1.txt +/usr/share/doc/git/RelNotes/1.5.2.2.txt +/usr/share/doc/git/RelNotes/1.5.2.3.txt +/usr/share/doc/git/RelNotes/1.5.2.4.txt +/usr/share/doc/git/RelNotes/1.5.2.5.txt +/usr/share/doc/git/RelNotes/1.5.2.txt +/usr/share/doc/git/RelNotes/1.5.3.1.txt +/usr/share/doc/git/RelNotes/1.5.3.2.txt +/usr/share/doc/git/RelNotes/1.5.3.3.txt +/usr/share/doc/git/RelNotes/1.5.3.4.txt +/usr/share/doc/git/RelNotes/1.5.3.5.txt +/usr/share/doc/git/RelNotes/1.5.3.6.txt +/usr/share/doc/git/RelNotes/1.5.3.7.txt +/usr/share/doc/git/RelNotes/1.5.3.8.txt +/usr/share/doc/git/RelNotes/1.5.3.txt +/usr/share/doc/git/RelNotes/1.5.4.1.txt +/usr/share/doc/git/RelNotes/1.5.4.2.txt +/usr/share/doc/git/RelNotes/1.5.4.3.txt +/usr/share/doc/git/RelNotes/1.5.4.4.txt +/usr/share/doc/git/RelNotes/1.5.4.5.txt +/usr/share/doc/git/RelNotes/1.5.4.6.txt +/usr/share/doc/git/RelNotes/1.5.4.7.txt +/usr/share/doc/git/RelNotes/1.5.4.txt +/usr/share/doc/git/RelNotes/1.5.5.1.txt +/usr/share/doc/git/RelNotes/1.5.5.2.txt +/usr/share/doc/git/RelNotes/1.5.5.3.txt +/usr/share/doc/git/RelNotes/1.5.5.4.txt +/usr/share/doc/git/RelNotes/1.5.5.5.txt +/usr/share/doc/git/RelNotes/1.5.5.6.txt +/usr/share/doc/git/RelNotes/1.5.5.txt +/usr/share/doc/git/RelNotes/1.5.6.1.txt +/usr/share/doc/git/RelNotes/1.5.6.2.txt +/usr/share/doc/git/RelNotes/1.5.6.3.txt +/usr/share/doc/git/RelNotes/1.5.6.4.txt +/usr/share/doc/git/RelNotes/1.5.6.5.txt +/usr/share/doc/git/RelNotes/1.5.6.6.txt +/usr/share/doc/git/RelNotes/1.5.6.txt +/usr/share/doc/git/RelNotes/1.6.0.1.txt +/usr/share/doc/git/RelNotes/1.6.0.2.txt +/usr/share/doc/git/RelNotes/1.6.0.3.txt +/usr/share/doc/git/RelNotes/1.6.0.4.txt +/usr/share/doc/git/RelNotes/1.6.0.5.txt +/usr/share/doc/git/RelNotes/1.6.0.6.txt +/usr/share/doc/git/RelNotes/1.6.0.txt +/usr/share/doc/git/RelNotes/1.6.1.1.txt +/usr/share/doc/git/RelNotes/1.6.1.2.txt +/usr/share/doc/git/RelNotes/1.6.1.3.txt +/usr/share/doc/git/RelNotes/1.6.1.4.txt +/usr/share/doc/git/RelNotes/1.6.1.txt +/usr/share/doc/git/RelNotes/1.6.2.1.txt +/usr/share/doc/git/RelNotes/1.6.2.2.txt +/usr/share/doc/git/RelNotes/1.6.2.3.txt +/usr/share/doc/git/RelNotes/1.6.2.4.txt +/usr/share/doc/git/RelNotes/1.6.2.5.txt +/usr/share/doc/git/RelNotes/1.6.2.txt +/usr/share/doc/git/RelNotes/1.6.3.1.txt +/usr/share/doc/git/RelNotes/1.6.3.2.txt +/usr/share/doc/git/RelNotes/1.6.3.3.txt +/usr/share/doc/git/RelNotes/1.6.3.4.txt +/usr/share/doc/git/RelNotes/1.6.3.txt +/usr/share/doc/git/RelNotes/1.6.4.1.txt +/usr/share/doc/git/RelNotes/1.6.4.2.txt +/usr/share/doc/git/RelNotes/1.6.4.3.txt +/usr/share/doc/git/RelNotes/1.6.4.4.txt +/usr/share/doc/git/RelNotes/1.6.4.5.txt +/usr/share/doc/git/RelNotes/1.6.4.txt +/usr/share/doc/git/RelNotes/1.6.5.1.txt +/usr/share/doc/git/RelNotes/1.6.5.2.txt +/usr/share/doc/git/RelNotes/1.6.5.3.txt +/usr/share/doc/git/RelNotes/1.6.5.4.txt +/usr/share/doc/git/RelNotes/1.6.5.5.txt +/usr/share/doc/git/RelNotes/1.6.5.6.txt +/usr/share/doc/git/RelNotes/1.6.5.7.txt +/usr/share/doc/git/RelNotes/1.6.5.8.txt +/usr/share/doc/git/RelNotes/1.6.5.9.txt +/usr/share/doc/git/RelNotes/1.6.5.txt +/usr/share/doc/git/RelNotes/1.6.6.1.txt +/usr/share/doc/git/RelNotes/1.6.6.2.txt +/usr/share/doc/git/RelNotes/1.6.6.3.txt +/usr/share/doc/git/RelNotes/1.6.6.txt +/usr/share/doc/git/RelNotes/1.7.0.1.txt +/usr/share/doc/git/RelNotes/1.7.0.2.txt +/usr/share/doc/git/RelNotes/1.7.0.3.txt +/usr/share/doc/git/RelNotes/1.7.0.4.txt +/usr/share/doc/git/RelNotes/1.7.0.5.txt +/usr/share/doc/git/RelNotes/1.7.0.6.txt +/usr/share/doc/git/RelNotes/1.7.0.7.txt +/usr/share/doc/git/RelNotes/1.7.0.8.txt +/usr/share/doc/git/RelNotes/1.7.0.9.txt +/usr/share/doc/git/RelNotes/1.7.0.txt +/usr/share/doc/git/RelNotes/1.7.1.1.txt +/usr/share/doc/git/RelNotes/1.7.1.2.txt +/usr/share/doc/git/RelNotes/1.7.1.3.txt +/usr/share/doc/git/RelNotes/1.7.1.4.txt +/usr/share/doc/git/RelNotes/1.7.1.txt +/usr/share/doc/git/RelNotes/1.7.10.1.txt +/usr/share/doc/git/RelNotes/1.7.10.2.txt +/usr/share/doc/git/RelNotes/1.7.10.3.txt +/usr/share/doc/git/RelNotes/1.7.10.4.txt +/usr/share/doc/git/RelNotes/1.7.10.5.txt +/usr/share/doc/git/RelNotes/1.7.10.txt +/usr/share/doc/git/RelNotes/1.7.11.1.txt +/usr/share/doc/git/RelNotes/1.7.11.2.txt +/usr/share/doc/git/RelNotes/1.7.11.3.txt +/usr/share/doc/git/RelNotes/1.7.11.4.txt +/usr/share/doc/git/RelNotes/1.7.11.5.txt +/usr/share/doc/git/RelNotes/1.7.11.6.txt +/usr/share/doc/git/RelNotes/1.7.11.7.txt +/usr/share/doc/git/RelNotes/1.7.11.txt +/usr/share/doc/git/RelNotes/1.7.12.1.txt +/usr/share/doc/git/RelNotes/1.7.12.2.txt +/usr/share/doc/git/RelNotes/1.7.12.3.txt +/usr/share/doc/git/RelNotes/1.7.12.4.txt +/usr/share/doc/git/RelNotes/1.7.12.txt +/usr/share/doc/git/RelNotes/1.7.2.1.txt +/usr/share/doc/git/RelNotes/1.7.2.2.txt +/usr/share/doc/git/RelNotes/1.7.2.3.txt +/usr/share/doc/git/RelNotes/1.7.2.4.txt +/usr/share/doc/git/RelNotes/1.7.2.5.txt +/usr/share/doc/git/RelNotes/1.7.2.txt +/usr/share/doc/git/RelNotes/1.7.3.1.txt +/usr/share/doc/git/RelNotes/1.7.3.2.txt +/usr/share/doc/git/RelNotes/1.7.3.3.txt +/usr/share/doc/git/RelNotes/1.7.3.4.txt +/usr/share/doc/git/RelNotes/1.7.3.5.txt +/usr/share/doc/git/RelNotes/1.7.3.txt +/usr/share/doc/git/RelNotes/1.7.4.1.txt +/usr/share/doc/git/RelNotes/1.7.4.2.txt +/usr/share/doc/git/RelNotes/1.7.4.3.txt +/usr/share/doc/git/RelNotes/1.7.4.4.txt +/usr/share/doc/git/RelNotes/1.7.4.5.txt +/usr/share/doc/git/RelNotes/1.7.4.txt +/usr/share/doc/git/RelNotes/1.7.5.1.txt +/usr/share/doc/git/RelNotes/1.7.5.2.txt +/usr/share/doc/git/RelNotes/1.7.5.3.txt +/usr/share/doc/git/RelNotes/1.7.5.4.txt +/usr/share/doc/git/RelNotes/1.7.5.txt +/usr/share/doc/git/RelNotes/1.7.6.1.txt +/usr/share/doc/git/RelNotes/1.7.6.2.txt +/usr/share/doc/git/RelNotes/1.7.6.3.txt +/usr/share/doc/git/RelNotes/1.7.6.4.txt +/usr/share/doc/git/RelNotes/1.7.6.5.txt +/usr/share/doc/git/RelNotes/1.7.6.6.txt +/usr/share/doc/git/RelNotes/1.7.6.txt +/usr/share/doc/git/RelNotes/1.7.7.1.txt +/usr/share/doc/git/RelNotes/1.7.7.2.txt +/usr/share/doc/git/RelNotes/1.7.7.3.txt +/usr/share/doc/git/RelNotes/1.7.7.4.txt +/usr/share/doc/git/RelNotes/1.7.7.5.txt +/usr/share/doc/git/RelNotes/1.7.7.6.txt +/usr/share/doc/git/RelNotes/1.7.7.7.txt +/usr/share/doc/git/RelNotes/1.7.7.txt +/usr/share/doc/git/RelNotes/1.7.8.1.txt +/usr/share/doc/git/RelNotes/1.7.8.2.txt +/usr/share/doc/git/RelNotes/1.7.8.3.txt +/usr/share/doc/git/RelNotes/1.7.8.4.txt +/usr/share/doc/git/RelNotes/1.7.8.5.txt +/usr/share/doc/git/RelNotes/1.7.8.6.txt +/usr/share/doc/git/RelNotes/1.7.8.txt +/usr/share/doc/git/RelNotes/1.7.9.1.txt +/usr/share/doc/git/RelNotes/1.7.9.2.txt +/usr/share/doc/git/RelNotes/1.7.9.3.txt +/usr/share/doc/git/RelNotes/1.7.9.4.txt +/usr/share/doc/git/RelNotes/1.7.9.5.txt +/usr/share/doc/git/RelNotes/1.7.9.6.txt +/usr/share/doc/git/RelNotes/1.7.9.7.txt +/usr/share/doc/git/RelNotes/1.7.9.txt +/usr/share/doc/git/RelNotes/1.8.0.1.txt +/usr/share/doc/git/RelNotes/1.8.0.2.txt +/usr/share/doc/git/RelNotes/1.8.0.3.txt +/usr/share/doc/git/RelNotes/1.8.0.txt +/usr/share/doc/git/RelNotes/1.8.1.1.txt +/usr/share/doc/git/RelNotes/1.8.1.2.txt +/usr/share/doc/git/RelNotes/1.8.1.3.txt +/usr/share/doc/git/RelNotes/1.8.1.4.txt +/usr/share/doc/git/RelNotes/1.8.1.5.txt +/usr/share/doc/git/RelNotes/1.8.1.6.txt +/usr/share/doc/git/RelNotes/1.8.1.txt +/usr/share/doc/git/RelNotes/1.8.2.1.txt +/usr/share/doc/git/RelNotes/1.8.2.2.txt +/usr/share/doc/git/RelNotes/1.8.2.3.txt +/usr/share/doc/git/RelNotes/1.8.2.txt +/usr/share/doc/git/RelNotes/1.8.3.1.txt +/usr/share/doc/git/RelNotes/1.8.3.2.txt +/usr/share/doc/git/RelNotes/1.8.3.3.txt +/usr/share/doc/git/RelNotes/1.8.3.4.txt +/usr/share/doc/git/RelNotes/1.8.3.txt +/usr/share/doc/git/RelNotes/1.8.4.1.txt +/usr/share/doc/git/RelNotes/1.8.4.2.txt +/usr/share/doc/git/RelNotes/1.8.4.3.txt +/usr/share/doc/git/RelNotes/1.8.4.4.txt +/usr/share/doc/git/RelNotes/1.8.4.5.txt +/usr/share/doc/git/RelNotes/1.8.4.txt +/usr/share/doc/git/RelNotes/1.8.5.1.txt +/usr/share/doc/git/RelNotes/1.8.5.2.txt +/usr/share/doc/git/RelNotes/1.8.5.3.txt +/usr/share/doc/git/RelNotes/1.8.5.4.txt +/usr/share/doc/git/RelNotes/1.8.5.5.txt +/usr/share/doc/git/RelNotes/1.8.5.6.txt +/usr/share/doc/git/RelNotes/1.8.5.txt +/usr/share/doc/git/RelNotes/1.9.0.txt +/usr/share/doc/git/RelNotes/1.9.1.txt +/usr/share/doc/git/RelNotes/1.9.2.txt +/usr/share/doc/git/RelNotes/1.9.3.txt +/usr/share/doc/git/RelNotes/1.9.4.txt +/usr/share/doc/git/RelNotes/1.9.5.txt +/usr/share/doc/git/RelNotes/2.0.0.txt +/usr/share/doc/git/RelNotes/2.0.1.txt +/usr/share/doc/git/RelNotes/2.0.2.txt +/usr/share/doc/git/RelNotes/2.0.3.txt +/usr/share/doc/git/RelNotes/2.0.4.txt +/usr/share/doc/git/RelNotes/2.0.5.txt +/usr/share/doc/git/RelNotes/2.1.0.txt +/usr/share/doc/git/RelNotes/2.1.1.txt +/usr/share/doc/git/RelNotes/2.1.2.txt +/usr/share/doc/git/RelNotes/2.1.3.txt +/usr/share/doc/git/RelNotes/2.1.4.txt +/usr/share/doc/git/RelNotes/2.10.0.txt +/usr/share/doc/git/RelNotes/2.10.1.txt +/usr/share/doc/git/RelNotes/2.10.2.txt +/usr/share/doc/git/RelNotes/2.10.3.txt +/usr/share/doc/git/RelNotes/2.10.4.txt +/usr/share/doc/git/RelNotes/2.10.5.txt +/usr/share/doc/git/RelNotes/2.11.0.txt +/usr/share/doc/git/RelNotes/2.11.1.txt +/usr/share/doc/git/RelNotes/2.11.2.txt +/usr/share/doc/git/RelNotes/2.11.3.txt +/usr/share/doc/git/RelNotes/2.11.4.txt +/usr/share/doc/git/RelNotes/2.12.0.txt +/usr/share/doc/git/RelNotes/2.12.1.txt +/usr/share/doc/git/RelNotes/2.12.2.txt +/usr/share/doc/git/RelNotes/2.12.3.txt +/usr/share/doc/git/RelNotes/2.12.4.txt +/usr/share/doc/git/RelNotes/2.12.5.txt +/usr/share/doc/git/RelNotes/2.13.0.txt +/usr/share/doc/git/RelNotes/2.13.1.txt +/usr/share/doc/git/RelNotes/2.13.2.txt +/usr/share/doc/git/RelNotes/2.13.3.txt +/usr/share/doc/git/RelNotes/2.13.4.txt +/usr/share/doc/git/RelNotes/2.13.5.txt +/usr/share/doc/git/RelNotes/2.13.6.txt +/usr/share/doc/git/RelNotes/2.13.7.txt +/usr/share/doc/git/RelNotes/2.14.0.txt +/usr/share/doc/git/RelNotes/2.14.1.txt +/usr/share/doc/git/RelNotes/2.14.2.txt +/usr/share/doc/git/RelNotes/2.14.3.txt +/usr/share/doc/git/RelNotes/2.14.4.txt +/usr/share/doc/git/RelNotes/2.15.0.txt +/usr/share/doc/git/RelNotes/2.15.1.txt +/usr/share/doc/git/RelNotes/2.15.2.txt +/usr/share/doc/git/RelNotes/2.16.0.txt +/usr/share/doc/git/RelNotes/2.16.1.txt +/usr/share/doc/git/RelNotes/2.16.2.txt +/usr/share/doc/git/RelNotes/2.16.3.txt +/usr/share/doc/git/RelNotes/2.16.4.txt +/usr/share/doc/git/RelNotes/2.17.0.txt +/usr/share/doc/git/RelNotes/2.17.1.txt +/usr/share/doc/git/RelNotes/2.2.0.txt +/usr/share/doc/git/RelNotes/2.2.1.txt +/usr/share/doc/git/RelNotes/2.2.2.txt +/usr/share/doc/git/RelNotes/2.2.3.txt +/usr/share/doc/git/RelNotes/2.3.0.txt +/usr/share/doc/git/RelNotes/2.3.1.txt +/usr/share/doc/git/RelNotes/2.3.10.txt +/usr/share/doc/git/RelNotes/2.3.2.txt +/usr/share/doc/git/RelNotes/2.3.3.txt +/usr/share/doc/git/RelNotes/2.3.4.txt +/usr/share/doc/git/RelNotes/2.3.5.txt +/usr/share/doc/git/RelNotes/2.3.6.txt +/usr/share/doc/git/RelNotes/2.3.7.txt +/usr/share/doc/git/RelNotes/2.3.8.txt +/usr/share/doc/git/RelNotes/2.3.9.txt +/usr/share/doc/git/RelNotes/2.4.0.txt +/usr/share/doc/git/RelNotes/2.4.1.txt +/usr/share/doc/git/RelNotes/2.4.10.txt +/usr/share/doc/git/RelNotes/2.4.11.txt +/usr/share/doc/git/RelNotes/2.4.12.txt +/usr/share/doc/git/RelNotes/2.4.2.txt +/usr/share/doc/git/RelNotes/2.4.3.txt +/usr/share/doc/git/RelNotes/2.4.4.txt +/usr/share/doc/git/RelNotes/2.4.5.txt +/usr/share/doc/git/RelNotes/2.4.6.txt +/usr/share/doc/git/RelNotes/2.4.7.txt +/usr/share/doc/git/RelNotes/2.4.8.txt +/usr/share/doc/git/RelNotes/2.4.9.txt +/usr/share/doc/git/RelNotes/2.5.0.txt +/usr/share/doc/git/RelNotes/2.5.1.txt +/usr/share/doc/git/RelNotes/2.5.2.txt +/usr/share/doc/git/RelNotes/2.5.3.txt +/usr/share/doc/git/RelNotes/2.5.4.txt +/usr/share/doc/git/RelNotes/2.5.5.txt +/usr/share/doc/git/RelNotes/2.5.6.txt +/usr/share/doc/git/RelNotes/2.6.0.txt +/usr/share/doc/git/RelNotes/2.6.1.txt +/usr/share/doc/git/RelNotes/2.6.2.txt +/usr/share/doc/git/RelNotes/2.6.3.txt +/usr/share/doc/git/RelNotes/2.6.4.txt +/usr/share/doc/git/RelNotes/2.6.5.txt +/usr/share/doc/git/RelNotes/2.6.6.txt +/usr/share/doc/git/RelNotes/2.6.7.txt +/usr/share/doc/git/RelNotes/2.7.0.txt +/usr/share/doc/git/RelNotes/2.7.1.txt +/usr/share/doc/git/RelNotes/2.7.2.txt +/usr/share/doc/git/RelNotes/2.7.3.txt +/usr/share/doc/git/RelNotes/2.7.4.txt +/usr/share/doc/git/RelNotes/2.7.5.txt +/usr/share/doc/git/RelNotes/2.7.6.txt +/usr/share/doc/git/RelNotes/2.8.0.txt +/usr/share/doc/git/RelNotes/2.8.1.txt +/usr/share/doc/git/RelNotes/2.8.2.txt +/usr/share/doc/git/RelNotes/2.8.3.txt +/usr/share/doc/git/RelNotes/2.8.4.txt +/usr/share/doc/git/RelNotes/2.8.5.txt +/usr/share/doc/git/RelNotes/2.8.6.txt +/usr/share/doc/git/RelNotes/2.9.0.txt +/usr/share/doc/git/RelNotes/2.9.1.txt +/usr/share/doc/git/RelNotes/2.9.2.txt +/usr/share/doc/git/RelNotes/2.9.3.txt +/usr/share/doc/git/RelNotes/2.9.4.txt +/usr/share/doc/git/RelNotes/2.9.5.txt +/usr/share/doc/git/changelog.Debian.gz +/usr/share/doc/git/contrib +/usr/share/doc/git/contrib/README +/usr/share/doc/git/contrib/buildsystems +/usr/share/doc/git/contrib/buildsystems/Generators +/usr/share/doc/git/contrib/buildsystems/Generators/QMake.pm +/usr/share/doc/git/contrib/buildsystems/Generators/Vcproj.pm +/usr/share/doc/git/contrib/buildsystems/Generators.pm +/usr/share/doc/git/contrib/buildsystems/engine.pl +/usr/share/doc/git/contrib/buildsystems/generate +/usr/share/doc/git/contrib/buildsystems/parse.pl +/usr/share/doc/git/contrib/coccinelle +/usr/share/doc/git/contrib/coccinelle/README +/usr/share/doc/git/contrib/coccinelle/array.cocci +/usr/share/doc/git/contrib/coccinelle/free.cocci +/usr/share/doc/git/contrib/coccinelle/object_id.cocci +/usr/share/doc/git/contrib/coccinelle/qsort.cocci +/usr/share/doc/git/contrib/coccinelle/strbuf.cocci +/usr/share/doc/git/contrib/coccinelle/swap.cocci +/usr/share/doc/git/contrib/coccinelle/xstrdup_or_null.cocci +/usr/share/doc/git/contrib/contacts +/usr/share/doc/git/contrib/contacts/Makefile +/usr/share/doc/git/contrib/contacts/git-contacts +/usr/share/doc/git/contrib/contacts/git-contacts.txt +/usr/share/doc/git/contrib/convert-grafts-to-replace-refs.sh +/usr/share/doc/git/contrib/credential +/usr/share/doc/git/contrib/credential/gnome-keyring +/usr/share/doc/git/contrib/credential/gnome-keyring/Makefile +/usr/share/doc/git/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c +/usr/share/doc/git/contrib/credential/libsecret +/usr/share/doc/git/contrib/credential/libsecret/Makefile +/usr/share/doc/git/contrib/credential/libsecret/git-credential-libsecret.c +/usr/share/doc/git/contrib/credential/netrc +/usr/share/doc/git/contrib/credential/netrc/Makefile +/usr/share/doc/git/contrib/credential/netrc/git-credential-netrc +/usr/share/doc/git/contrib/credential/netrc/test.netrc +/usr/share/doc/git/contrib/credential/netrc/test.pl +/usr/share/doc/git/contrib/credential/osxkeychain +/usr/share/doc/git/contrib/credential/osxkeychain/Makefile +/usr/share/doc/git/contrib/credential/osxkeychain/git-credential-osxkeychain.c +/usr/share/doc/git/contrib/credential/wincred +/usr/share/doc/git/contrib/credential/wincred/Makefile +/usr/share/doc/git/contrib/credential/wincred/git-credential-wincred.c +/usr/share/doc/git/contrib/diff-highlight +/usr/share/doc/git/contrib/diff-highlight/DiffHighlight.pm +/usr/share/doc/git/contrib/diff-highlight/Makefile +/usr/share/doc/git/contrib/diff-highlight/README +/usr/share/doc/git/contrib/diff-highlight/diff-highlight +/usr/share/doc/git/contrib/diff-highlight/diff-highlight.perl +/usr/share/doc/git/contrib/diff-highlight/shebang.perl +/usr/share/doc/git/contrib/diff-highlight/t +/usr/share/doc/git/contrib/diff-highlight/t/Makefile +/usr/share/doc/git/contrib/diff-highlight/t/t9400-diff-highlight.sh +/usr/share/doc/git/contrib/examples +/usr/share/doc/git/contrib/examples/README +/usr/share/doc/git/contrib/examples/builtin-fetch--tool.c +/usr/share/doc/git/contrib/examples/git-am.sh +/usr/share/doc/git/contrib/examples/git-checkout.sh +/usr/share/doc/git/contrib/examples/git-clean.sh +/usr/share/doc/git/contrib/examples/git-clone.sh +/usr/share/doc/git/contrib/examples/git-commit.sh +/usr/share/doc/git/contrib/examples/git-difftool.perl +/usr/share/doc/git/contrib/examples/git-fetch.sh +/usr/share/doc/git/contrib/examples/git-gc.sh +/usr/share/doc/git/contrib/examples/git-log.sh +/usr/share/doc/git/contrib/examples/git-ls-remote.sh +/usr/share/doc/git/contrib/examples/git-merge-ours.sh +/usr/share/doc/git/contrib/examples/git-merge.sh +/usr/share/doc/git/contrib/examples/git-notes.sh +/usr/share/doc/git/contrib/examples/git-pull.sh +/usr/share/doc/git/contrib/examples/git-remote.perl +/usr/share/doc/git/contrib/examples/git-repack.sh +/usr/share/doc/git/contrib/examples/git-rerere.perl +/usr/share/doc/git/contrib/examples/git-reset.sh +/usr/share/doc/git/contrib/examples/git-resolve.sh +/usr/share/doc/git/contrib/examples/git-revert.sh +/usr/share/doc/git/contrib/examples/git-svnimport.perl +/usr/share/doc/git/contrib/examples/git-svnimport.txt +/usr/share/doc/git/contrib/examples/git-tag.sh +/usr/share/doc/git/contrib/examples/git-verify-tag.sh +/usr/share/doc/git/contrib/examples/git-whatchanged.sh +/usr/share/doc/git/contrib/fast-import +/usr/share/doc/git/contrib/fast-import/git-import.perl +/usr/share/doc/git/contrib/fast-import/git-import.sh +/usr/share/doc/git/contrib/fast-import/git-p4.README +/usr/share/doc/git/contrib/fast-import/import-directories.perl +/usr/share/doc/git/contrib/fast-import/import-tars.perl +/usr/share/doc/git/contrib/fast-import/import-zips.py +/usr/share/doc/git/contrib/git-jump +/usr/share/doc/git/contrib/git-jump/README +/usr/share/doc/git/contrib/git-jump/git-jump +/usr/share/doc/git/contrib/git-resurrect.sh +/usr/share/doc/git/contrib/git-shell-commands +/usr/share/doc/git/contrib/git-shell-commands/README +/usr/share/doc/git/contrib/git-shell-commands/help +/usr/share/doc/git/contrib/git-shell-commands/list +/usr/share/doc/git/contrib/hg-to-git +/usr/share/doc/git/contrib/hg-to-git/hg-to-git.py +/usr/share/doc/git/contrib/hg-to-git/hg-to-git.txt +/usr/share/doc/git/contrib/long-running-filter +/usr/share/doc/git/contrib/long-running-filter/example.pl +/usr/share/doc/git/contrib/persistent-https +/usr/share/doc/git/contrib/persistent-https/Makefile +/usr/share/doc/git/contrib/persistent-https/README +/usr/share/doc/git/contrib/persistent-https/client.go +/usr/share/doc/git/contrib/persistent-https/main.go +/usr/share/doc/git/contrib/persistent-https/proxy.go +/usr/share/doc/git/contrib/persistent-https/socket.go +/usr/share/doc/git/contrib/remote-helpers +/usr/share/doc/git/contrib/remote-helpers/README +/usr/share/doc/git/contrib/remote-helpers/git-remote-bzr +/usr/share/doc/git/contrib/remote-helpers/git-remote-hg +/usr/share/doc/git/contrib/remotes2config.sh +/usr/share/doc/git/contrib/rerere-train.sh +/usr/share/doc/git/contrib/stats +/usr/share/doc/git/contrib/stats/git-common-hash +/usr/share/doc/git/contrib/stats/mailmap.pl +/usr/share/doc/git/contrib/stats/packinfo.pl +/usr/share/doc/git/contrib/subtree +/usr/share/doc/git/contrib/subtree/INSTALL +/usr/share/doc/git/contrib/subtree/Makefile +/usr/share/doc/git/contrib/subtree/README +/usr/share/doc/git/contrib/subtree/git-subtree +/usr/share/doc/git/contrib/subtree/git-subtree.sh +/usr/share/doc/git/contrib/subtree/git-subtree.txt +/usr/share/doc/git/contrib/subtree/t +/usr/share/doc/git/contrib/subtree/t/Makefile +/usr/share/doc/git/contrib/subtree/t/t7900-subtree.sh +/usr/share/doc/git/contrib/subtree/todo +/usr/share/doc/git/contrib/svn-fe +/usr/share/doc/git/contrib/svn-fe/Makefile +/usr/share/doc/git/contrib/svn-fe/svn-fe.c +/usr/share/doc/git/contrib/svn-fe/svn-fe.txt +/usr/share/doc/git/contrib/svn-fe/svnrdump_sim.py +/usr/share/doc/git/contrib/thunderbird-patch-inline +/usr/share/doc/git/contrib/thunderbird-patch-inline/README +/usr/share/doc/git/contrib/thunderbird-patch-inline/appp.sh +/usr/share/doc/git/contrib/update-unicode +/usr/share/doc/git/contrib/update-unicode/README +/usr/share/doc/git/contrib/update-unicode/update_unicode.sh +/usr/share/doc/git/contrib/workdir +/usr/share/doc/git/contrib/workdir/git-new-workdir +/usr/share/doc/git/copyright +/usr/share/git-core +/usr/share/git-core/contrib +/usr/share/git-core/contrib/hooks +/usr/share/git-core/contrib/hooks/post-receive-email +/usr/share/git-core/contrib/hooks/pre-auto-gc-battery +/usr/share/git-core/contrib/hooks/setgitperms.perl +/usr/share/git-core/contrib/hooks/update-paranoid +/usr/share/git-core/templates +/usr/share/git-core/templates/branches +/usr/share/git-core/templates/description +/usr/share/git-core/templates/hooks +/usr/share/git-core/templates/hooks/applypatch-msg.sample +/usr/share/git-core/templates/hooks/commit-msg.sample +/usr/share/git-core/templates/hooks/fsmonitor-watchman.sample +/usr/share/git-core/templates/hooks/post-update.sample +/usr/share/git-core/templates/hooks/pre-applypatch.sample +/usr/share/git-core/templates/hooks/pre-commit.sample +/usr/share/git-core/templates/hooks/pre-push.sample +/usr/share/git-core/templates/hooks/pre-rebase.sample +/usr/share/git-core/templates/hooks/pre-receive.sample +/usr/share/git-core/templates/hooks/prepare-commit-msg.sample +/usr/share/git-core/templates/hooks/update.sample +/usr/share/git-core/templates/info +/usr/share/git-core/templates/info/exclude +/usr/share/gitweb +/usr/share/gitweb/gitweb.cgi +/usr/share/gitweb/static +/usr/share/gitweb/static/git-favicon.png +/usr/share/gitweb/static/git-logo.png +/usr/share/gitweb/static/gitweb.css +/usr/share/gitweb/static/gitweb.js +/usr/share/lintian +/usr/share/lintian/overrides +/usr/share/lintian/overrides/git +/usr/share/perl5 +/usr/share/perl5/Git +/usr/share/perl5/Git/I18N.pm +/usr/share/perl5/Git/IndexInfo.pm +/usr/share/perl5/Git/LoadCPAN +/usr/share/perl5/Git/LoadCPAN/Error.pm +/usr/share/perl5/Git/LoadCPAN/Mail +/usr/share/perl5/Git/LoadCPAN/Mail/Address.pm +/usr/share/perl5/Git/LoadCPAN.pm +/usr/share/perl5/Git/Packet.pm +/usr/share/perl5/Git.pm +/var +/var/lib +/var/lib/git +/usr/bin/git-receive-pack +/usr/bin/git-upload-archive +/usr/lib/git-core/git-add +/usr/lib/git-core/git-am +/usr/lib/git-core/git-annotate +/usr/lib/git-core/git-apply +/usr/lib/git-core/git-archive +/usr/lib/git-core/git-bisect--helper +/usr/lib/git-core/git-blame +/usr/lib/git-core/git-branch +/usr/lib/git-core/git-bundle +/usr/lib/git-core/git-cat-file +/usr/lib/git-core/git-check-attr +/usr/lib/git-core/git-check-ignore +/usr/lib/git-core/git-check-mailmap +/usr/lib/git-core/git-check-ref-format +/usr/lib/git-core/git-checkout +/usr/lib/git-core/git-checkout-index +/usr/lib/git-core/git-cherry +/usr/lib/git-core/git-cherry-pick +/usr/lib/git-core/git-clean +/usr/lib/git-core/git-clone +/usr/lib/git-core/git-column +/usr/lib/git-core/git-commit +/usr/lib/git-core/git-commit-tree +/usr/lib/git-core/git-config +/usr/lib/git-core/git-count-objects +/usr/lib/git-core/git-credential +/usr/lib/git-core/git-describe +/usr/lib/git-core/git-diff +/usr/lib/git-core/git-diff-files +/usr/lib/git-core/git-diff-index +/usr/lib/git-core/git-diff-tree +/usr/lib/git-core/git-difftool +/usr/lib/git-core/git-fast-export +/usr/lib/git-core/git-fetch +/usr/lib/git-core/git-fetch-pack +/usr/lib/git-core/git-fmt-merge-msg +/usr/lib/git-core/git-for-each-ref +/usr/lib/git-core/git-format-patch +/usr/lib/git-core/git-fsck +/usr/lib/git-core/git-fsck-objects +/usr/lib/git-core/git-gc +/usr/lib/git-core/git-get-tar-commit-id +/usr/lib/git-core/git-grep +/usr/lib/git-core/git-hash-object +/usr/lib/git-core/git-help +/usr/lib/git-core/git-index-pack +/usr/lib/git-core/git-init +/usr/lib/git-core/git-init-db +/usr/lib/git-core/git-interpret-trailers +/usr/lib/git-core/git-log +/usr/lib/git-core/git-ls-files +/usr/lib/git-core/git-ls-remote +/usr/lib/git-core/git-ls-tree +/usr/lib/git-core/git-mailinfo +/usr/lib/git-core/git-mailsplit +/usr/lib/git-core/git-merge +/usr/lib/git-core/git-merge-base +/usr/lib/git-core/git-merge-file +/usr/lib/git-core/git-merge-index +/usr/lib/git-core/git-merge-ours +/usr/lib/git-core/git-merge-recursive +/usr/lib/git-core/git-merge-subtree +/usr/lib/git-core/git-merge-tree +/usr/lib/git-core/git-mktag +/usr/lib/git-core/git-mktree +/usr/lib/git-core/git-mv +/usr/lib/git-core/git-name-rev +/usr/lib/git-core/git-notes +/usr/lib/git-core/git-pack-objects +/usr/lib/git-core/git-pack-redundant +/usr/lib/git-core/git-pack-refs +/usr/lib/git-core/git-patch-id +/usr/lib/git-core/git-prune +/usr/lib/git-core/git-prune-packed +/usr/lib/git-core/git-pull +/usr/lib/git-core/git-push +/usr/lib/git-core/git-read-tree +/usr/lib/git-core/git-rebase--helper +/usr/lib/git-core/git-receive-pack +/usr/lib/git-core/git-reflog +/usr/lib/git-core/git-remote +/usr/lib/git-core/git-remote-ext +/usr/lib/git-core/git-remote-fd +/usr/lib/git-core/git-remote-ftp +/usr/lib/git-core/git-remote-ftps +/usr/lib/git-core/git-remote-https +/usr/lib/git-core/git-repack +/usr/lib/git-core/git-replace +/usr/lib/git-core/git-rerere +/usr/lib/git-core/git-reset +/usr/lib/git-core/git-rev-list +/usr/lib/git-core/git-rev-parse +/usr/lib/git-core/git-revert +/usr/lib/git-core/git-rm +/usr/lib/git-core/git-send-pack +/usr/lib/git-core/git-shortlog +/usr/lib/git-core/git-show +/usr/lib/git-core/git-show-branch +/usr/lib/git-core/git-show-ref +/usr/lib/git-core/git-stage +/usr/lib/git-core/git-status +/usr/lib/git-core/git-stripspace +/usr/lib/git-core/git-submodule--helper +/usr/lib/git-core/git-symbolic-ref +/usr/lib/git-core/git-tag +/usr/lib/git-core/git-unpack-file +/usr/lib/git-core/git-unpack-objects +/usr/lib/git-core/git-update-index +/usr/lib/git-core/git-update-ref +/usr/lib/git-core/git-update-server-info +/usr/lib/git-core/git-upload-archive +/usr/lib/git-core/git-var +/usr/lib/git-core/git-verify-commit +/usr/lib/git-core/git-verify-pack +/usr/lib/git-core/git-verify-tag +/usr/lib/git-core/git-whatchanged +/usr/lib/git-core/git-worktree +/usr/lib/git-core/git-write-tree +/usr/share/bash-completion/completions/gitk +/usr/share/doc/git/contrib/hooks +/usr/share/doc/git/contrib/persistent-https/LICENSE +/usr/share/doc/git/contrib/subtree/COPYING +/usr/share/gitweb/index.cgi +/. +/etc +/etc/wgetrc +/usr +/usr/bin +/usr/bin/wget +/usr/share +/usr/share/doc +/usr/share/doc/wget +/usr/share/doc/wget/AUTHORS +/usr/share/doc/wget/MAILING-LIST +/usr/share/doc/wget/NEWS.gz +/usr/share/doc/wget/README +/usr/share/doc/wget/changelog.Debian.gz +/usr/share/doc/wget/copyright +/usr/share/info +/usr/share/info/wget.info.gz +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/wget.1.gz +/. +/usr +/usr/bin +/usr/bin/curl +/usr/share +/usr/share/doc +/usr/share/doc/curl +/usr/share/doc/curl/copyright +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/curl.1.gz +/usr/share/zsh +/usr/share/zsh/vendor-completions +/usr/share/zsh/vendor-completions/_curl +/usr/share/doc/curl/NEWS.Debian.gz +/usr/share/doc/curl/changelog.Debian.gz +/. +/bin +/bin/tar +/etc +/etc/rmt +/usr +/usr/lib +/usr/lib/mime +/usr/lib/mime/packages +/usr/lib/mime/packages/tar +/usr/lib/tar +/usr/sbin +/usr/sbin/rmt-tar +/usr/sbin/tarcat +/usr/share +/usr/share/doc +/usr/share/doc/tar +/usr/share/doc/tar/AUTHORS +/usr/share/doc/tar/NEWS.Debian.gz +/usr/share/doc/tar/NEWS.gz +/usr/share/doc/tar/README.Debian +/usr/share/doc/tar/THANKS.gz +/usr/share/doc/tar/changelog.Debian.gz +/usr/share/doc/tar/copyright +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/tar.1.gz +/usr/share/man/man1/tarcat.1.gz +/usr/share/man/man8 +/usr/share/man/man8/rmt-tar.8.gz +/. +/etc +/etc/apparmor.d +/etc/apparmor.d/usr.lib.snapd.snap-confine.real +/etc/apt +/etc/apt/apt.conf.d +/etc/apt/apt.conf.d/20snapd.conf +/etc/profile.d +/etc/profile.d/apps-bin-path.sh +/etc/xdg +/etc/xdg/autostart +/etc/xdg/autostart/snap-userd-autostart.desktop +/lib +/lib/systemd +/lib/systemd/system +/lib/systemd/system/snapd.apparmor.service +/lib/systemd/system/snapd.autoimport.service +/lib/systemd/system/snapd.core-fixup.service +/lib/systemd/system/snapd.failure.service +/lib/systemd/system/snapd.recovery-chooser-trigger.service +/lib/systemd/system/snapd.seeded.service +/lib/systemd/system/snapd.service +/lib/systemd/system/snapd.snap-repair.service +/lib/systemd/system/snapd.snap-repair.timer +/lib/systemd/system/snapd.socket +/lib/systemd/system/snapd.system-shutdown.service +/lib/systemd/system-generators +/lib/systemd/system-generators/snapd-generator +/lib/udev +/lib/udev/rules.d +/lib/udev/rules.d/66-snapd-autoimport.rules +/snap +/usr +/usr/bin +/usr/bin/snap +/usr/bin/snapfuse +/usr/lib +/usr/lib/environment.d +/usr/lib/environment.d/990-snapd.conf +/usr/lib/snapd +/usr/lib/snapd/complete.sh +/usr/lib/snapd/etelpmoc.sh +/usr/lib/snapd/info +/usr/lib/snapd/snap-bootstrap +/usr/lib/snapd/snap-confine +/usr/lib/snapd/snap-device-helper +/usr/lib/snapd/snap-discard-ns +/usr/lib/snapd/snap-exec +/usr/lib/snapd/snap-failure +/usr/lib/snapd/snap-gdb-shim +/usr/lib/snapd/snap-gdbserver-shim +/usr/lib/snapd/snap-mgmt +/usr/lib/snapd/snap-preseed +/usr/lib/snapd/snap-recovery-chooser +/usr/lib/snapd/snap-repair +/usr/lib/snapd/snap-seccomp +/usr/lib/snapd/snap-update-ns +/usr/lib/snapd/snapctl +/usr/lib/snapd/snapd +/usr/lib/snapd/snapd-apparmor +/usr/lib/snapd/snapd.core-fixup.sh +/usr/lib/snapd/snapd.run-from-snap +/usr/lib/snapd/system-shutdown +/usr/lib/systemd +/usr/lib/systemd/system-environment-generators +/usr/lib/systemd/system-environment-generators/snapd-env-generator +/usr/lib/systemd/user +/usr/lib/systemd/user/snapd.session-agent.service +/usr/lib/systemd/user/snapd.session-agent.socket +/usr/lib/systemd/user/sockets.target.wants +/usr/share +/usr/share/applications +/usr/share/applications/io.snapcraft.SessionAgent.desktop +/usr/share/applications/snap-handle-link.desktop +/usr/share/bash-completion +/usr/share/bash-completion/completions +/usr/share/bash-completion/completions/snap +/usr/share/dbus-1 +/usr/share/dbus-1/services +/usr/share/dbus-1/services/io.snapcraft.Launcher.service +/usr/share/dbus-1/services/io.snapcraft.SessionAgent.service +/usr/share/dbus-1/services/io.snapcraft.Settings.service +/usr/share/dbus-1/session.d +/usr/share/dbus-1/session.d/snapd.session-services.conf +/usr/share/dbus-1/system.d +/usr/share/dbus-1/system.d/snapd.system-services.conf +/usr/share/doc +/usr/share/doc/snapd +/usr/share/doc/snapd/changelog.gz +/usr/share/doc/snapd/copyright +/usr/share/fish +/usr/share/fish/vendor_conf.d +/usr/share/fish/vendor_conf.d/snapd.fish +/usr/share/man +/usr/share/man/man8 +/usr/share/man/man8/snap-confine.8.gz +/usr/share/man/man8/snap-discard-ns.8.gz +/usr/share/man/man8/snap.8.gz +/usr/share/man/man8/snapd-env-generator.8.gz +/usr/share/polkit-1 +/usr/share/polkit-1/actions +/usr/share/polkit-1/actions/io.snapcraft.snapd.policy +/usr/share/zsh +/usr/share/zsh/vendor-completions +/usr/share/zsh/vendor-completions/_snap +/var +/var/cache +/var/cache/snapd +/var/lib +/var/lib/snapd +/var/lib/snapd/apparmor +/var/lib/snapd/apparmor/snap-confine +/var/lib/snapd/auto-import +/var/lib/snapd/dbus-1 +/var/lib/snapd/dbus-1/services +/var/lib/snapd/dbus-1/system-services +/var/lib/snapd/desktop +/var/lib/snapd/desktop/applications +/var/lib/snapd/environment +/var/lib/snapd/firstboot +/var/lib/snapd/inhibit +/var/lib/snapd/lib +/var/lib/snapd/lib/gl +/var/lib/snapd/lib/gl32 +/var/lib/snapd/lib/glvnd +/var/lib/snapd/lib/vulkan +/var/lib/snapd/snaps +/var/lib/snapd/snaps/partial +/var/lib/snapd/ssl +/var/lib/snapd/ssl/store-certs +/var/lib/snapd/void +/var/snap +/lib/udev/snappy-app-dev +/usr/bin/snapctl +/usr/bin/ubuntu-core-launcher +/usr/lib/systemd/user/sockets.target.wants/snapd.session-agent.socket +Required packages are present: git wget curl tar snapd +2022-06-29T14:39:47+02:00 INFO Waiting for automatic snapd restart... +jq 1.5+dfsg-1 from Michael Vogt (mvo) installed +Track start release: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506392&event=start&operation=release&value=ReleaseELEVEN&comment=&tags= +Track start docker_tag: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506392&event=start&operation=docker_tag&value=11&comment=&tags= +Track start installation_type: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506392&event=start&operation=installation_type&value=Default&comment=&tags= +Track checks checkingroot_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506392&event=checks&operation=checkingroot_ok&value=&comment=&tags= +Track checks noroot_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506393&event=checks&operation=noroot_ok&value=&comment=&tags= +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? Y +Track checks proceed_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506436&event=checks&operation=proceed_ok&value=&comment=&tags= +Installing OSM +Determining IP address of the interface with the default route +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-lxd-inotify.conf ... +fs.inotify.max_user_instances = 1024 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 1 +net.ipv4.conf.all.rp_filter = 1 +net.ipv4.tcp_syncookies = 1 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.all.promote_secondaries = 1 +net.core.default_qdisc = fq_codel +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +sysctl: setting key "net.core.bpf_jit_limit": Invalid argument +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following packages will be REMOVED: + liblxc-common* liblxc1* lxcfs* lxd* lxd-client* +0 upgraded, 0 newly installed, 5 to remove and 15 not upgraded. +After this operation, 34.2 MB disk space will be freed. +(Reading database ... 61647 files and directories currently installed.) +Removing lxd (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd dnsmasq configuration +Removing lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd-client (3.0.3-0ubuntu1~18.04.2) ... +Removing liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc1 (3.0.3-0ubuntu1~18.04.1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +(Reading database ... 61401 files and directories currently installed.) +Purging configuration files for liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxd (3.0.3-0ubuntu1~18.04.2) ... +Purging configuration files for lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Processing triggers for systemd (237-3ubuntu10.53) ... +Processing triggers for ureadahead (0.100.0-21) ... +lxd (4.0/stable) 4.0.9-8e2046b from Canonical** installed +To start your first container, try: lxc launch ubuntu:20.04 +Or for a virtual machine: lxc launch ubuntu:20.04 --vm + +Track prereq prereqok_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506480&event=prereq&operation=prereqok_ok&value=&comment=&tags= +DEBUG_INSTALL= +DOCKER_PROXY_URL= +USER=ubuntu +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20211016~18.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.96.24.32.18). +software-properties-common set to manually installed. +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 15 not upgraded. +Need to get 4348 B of archives. +After this operation, 154 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 apt-transport-https all 1.6.14 [4348 B] +Fetched 4348 B in 0s (78.7 kB/s) +Selecting previously unselected package apt-transport-https. +(Reading database ... 61384 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_1.6.14_all.deb ... +Unpacking apt-transport-https (1.6.14) ... +Setting up apt-transport-https (1.6.14) ... +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:3 https://download.docker.com/linux/ubuntu bionic InRelease [64.4 kB] +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 Packages [26.4 kB] +Fetched 90.9 kB in 1s (86.0 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + libltdl7 pigz +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +Recommended packages: + slirp4netns +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin libltdl7 pigz +0 upgraded, 7 newly installed, 0 to remove and 15 not upgraded. +Need to get 101 MB of archives. +After this operation, 422 MB of additional disk space will be used. +Get:1 https://download.docker.com/linux/ubuntu bionic/stable amd64 containerd.io amd64 1.6.6-1 [28.1 MB] +Get:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 libltdl7 amd64 2.4.6-2 [38.8 kB] +Get:4 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-cli amd64 5:20.10.17~3-0~ubuntu-bionic [40.6 MB] +Get:5 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce amd64 5:20.10.17~3-0~ubuntu-bionic [21.0 MB] +Get:6 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-rootless-extras amd64 5:20.10.17~3-0~ubuntu-bionic [8163 kB] +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-scan-plugin amd64 0.17.0~ubuntu-bionic [3521 kB] +Fetched 101 MB in 3s (32.1 MB/s) +Selecting previously unselected package pigz. +(Reading database ... 61388 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.6.6-1_amd64.deb ... +Unpacking containerd.io (1.6.6-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.17.0~ubuntu-bionic_amd64.deb ... +Unpacking docker-scan-plugin (0.17.0~ubuntu-bionic) ... +Selecting previously unselected package libltdl7:amd64. +Preparing to unpack .../6-libltdl7_2.4.6-2_amd64.deb ... +Unpacking libltdl7:amd64 (2.4.6-2) ... +Setting up containerd.io (1.6.6-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-bionic) ... +Setting up docker-scan-plugin (0.17.0~ubuntu-bionic) ... +Setting up libltdl7:amd64 (2.4.6-2) ... +Setting up docker-ce-cli (5:20.10.17~3-0~ubuntu-bionic) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce (5:20.10.17~3-0~ubuntu-bionic) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +Processing triggers for systemd (237-3ubuntu10.53) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.17 + API version: 1.41 + Go version: go1.17.11 + Git commit: 100c701 + Built: Mon Jun 6 23:02:56 2022 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.17 + API version: 1.41 (minimum version 1.12) + Go version: go1.17.11 + Git commit: a89b842 + Built: Mon Jun 6 23:01:02 2022 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.6.6 + GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 + runc: + Version: 1.1.2 + GitCommit: v1.1.2-0-ga916309 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Track docker_ce docker_ce_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506540&event=docker_ce&operation=docker_ce_ok&value=&comment=&tags= +Creating folders for installation +DEBUG_INSTALL= +DEFAULT_IP=192.168.64.23 +OSM_DEVOPS=/usr/share/osm-devops +OSM_DOCKER_WORK_DIR=/etc/osm/docker +INSTALL_K8S_MONITOR= +HOME=/home/ubuntu +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (1.6.14). +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 15 not upgraded. +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:6 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (1.6.14). +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 15 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:6 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [57.2 kB] +Fetched 66.6 kB in 1s (49.6 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + conntrack cri-tools kubernetes-cni socat +The following NEW packages will be installed: + conntrack cri-tools kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 7 newly installed, 0 to remove and 15 not upgraded. +Need to get 74.6 MB of archives. +After this operation, 323 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 conntrack amd64 1:1.4.4+snapshot20161117-6ubuntu2 [30.6 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 socat amd64 1.7.3.2-2ubuntu2 [342 kB] +Get:3 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.24.2-00 [12.3 MB] +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.23.3-00 [19.5 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.23.3-00 [8929 kB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.23.3-00 [8580 kB] +Fetched 74.6 MB in 4s (16.7 MB/s) +Selecting previously unselected package conntrack. +(Reading database ... 61640 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.4+snapshot20161117-6ubuntu2_amd64.deb ... +Unpacking conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.24.2-00_amd64.deb ... +Unpacking cri-tools (1.24.2-00) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../2-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../3-socat_1.7.3.2-2ubuntu2_amd64.deb ... +Unpacking socat (1.7.3.2-2ubuntu2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../4-kubelet_1.23.3-00_amd64.deb ... +Unpacking kubelet (1.23.3-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../5-kubectl_1.23.3-00_amd64.deb ... +Unpacking kubectl (1.23.3-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../6-kubeadm_1.23.3-00_amd64.deb ... +Unpacking kubeadm (1.23.3-00) ... +Setting up conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up cri-tools (1.24.2-00) ... +Setting up socat (1.7.3.2-2ubuntu2) ... +Setting up kubelet (1.23.3-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubectl (1.23.3-00) ... +Setting up kubeadm (1.23.3-00) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... + KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs" +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +Track k8scluster install_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506591&event=k8scluster&operation=install_k8s_ok&value=&comment=&tags= +I0629 14:43:12.608722 11354 version.go:255] remote version is much newer: v1.24.2; falling back to: stable-1.23 +[init] Using Kubernetes version: v1.23.8 +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local osm11] and IPs [10.96.0.1 192.168.64.23] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [localhost osm11] and IPs [192.168.64.23 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [localhost osm11] and IPs [192.168.64.23 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Starting the kubelet +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 17.506099 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster +NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently. +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm11 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] +[mark-control-plane] Marking the node osm11 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: mihz47.8il5yr3lovnr8rl3 +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +Alternatively, if you are the root user, you can run: + + export KUBECONFIG=/etc/kubernetes/admin.conf + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.23:6443 --token mihz47.8il5yr3lovnr8rl3 \ + --discovery-token-ca-cert-hash sha256:bba24d67d8ad9e2fcc690812be495a1623c782b3ebfb1327f20dc30835b92e51 +Track k8scluster init_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506668&event=k8scluster&operation=init_k8s_ok&value=&comment=&tags= +Error from server (NotFound): namespaces "osm" not found +wget: unrecognized option '--retry-on-host-error' +Usage: wget [OPTION]... [URL]... + +Try `wget --help' for more options. + +### Wed Jun 29 14:44:29 CEST 2022 deploy_cni_provider: FATAL error: Cannot Install Flannel because /tmp/flannel.yq7bEX/kube-flannel.yml was not found. Maybe the file https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml is temporarily not accessible +BACKTRACE: +### FATAL /usr/share/osm-devops/common/logging 46 +### deploy_cni_provider /usr/share/osm-devops/installers/install_kubeadm_cluster.sh 65 +### main /usr/share/osm-devops/installers/install_kubeadm_cluster.sh 298 +### MAIN non_file_source 0 +------- + +### Wed Jun 29 14:44:29 CEST 2022 install_osm: FATAL error: k8scluster install_kubeadm_cluster.sh failed +Track end fatal: https://osm.etsi.org/InstallLog.php?&installation_id=1656506392-cy5tVzv9sM0qcg7Y&local_ts=1656506669&event=end&operation=fatal&value=k8scluster&comment='install_kubeadm_cluster.sh_failed'&tags= +BACKTRACE: +### FATAL_TRACK /usr/share/osm-devops/common/logging 40 +### install_osm /usr/share/osm-devops/installers/full_install_osm.sh 678 +### main /usr/share/osm-devops/installers/full_install_osm.sh 1194 +### MAIN non_file_source 0 +### MAIN non_file_source +### MAIN non_file_source +### MAIN non_file_source +### MAIN non_file_source +### MAIN non_file_source +### MAIN non_file_source +------- diff --git a/_tmp/custom-osm-lcm/osm-install/install.log b/_tmp/custom-osm-lcm/osm-install/install.log new file mode 100644 index 0000000..6549470 --- /dev/null +++ b/_tmp/custom-osm-lcm/osm-install/install.log @@ -0,0 +1,4051 @@ +Checking required packages to add ETSI OSM debian repo: software-properties-common apt-transport-https +OK +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease [4086 B] +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Get:3 http://security.ubuntu.com/ubuntu focal-security InRelease [114 kB] +Get:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease [114 kB] +Get:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/devops amd64 Packages [483 B] +Get:6 http://archive.ubuntu.com/ubuntu focal-backports InRelease [108 kB] +Get:7 http://security.ubuntu.com/ubuntu focal-security/main amd64 Packages [1583 kB] +Get:8 http://archive.ubuntu.com/ubuntu focal/universe amd64 Packages [8628 kB] +Get:9 http://security.ubuntu.com/ubuntu focal-security/main Translation-en [268 kB] +Get:10 http://security.ubuntu.com/ubuntu focal-security/main amd64 c-n-f Metadata [10.6 kB] +Get:11 http://security.ubuntu.com/ubuntu focal-security/restricted amd64 Packages [1042 kB] +Get:12 http://security.ubuntu.com/ubuntu focal-security/restricted Translation-en [148 kB] +Get:13 http://security.ubuntu.com/ubuntu focal-security/restricted amd64 c-n-f Metadata [572 B] +Get:14 http://security.ubuntu.com/ubuntu focal-security/universe amd64 Packages [708 kB] +Get:15 http://security.ubuntu.com/ubuntu focal-security/universe Translation-en [127 kB] +Get:16 http://security.ubuntu.com/ubuntu focal-security/universe amd64 c-n-f Metadata [14.6 kB] +Get:17 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 Packages [22.2 kB] +Get:18 http://security.ubuntu.com/ubuntu focal-security/multiverse Translation-en [5376 B] +Get:19 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 c-n-f Metadata [512 B] +Get:20 http://archive.ubuntu.com/ubuntu focal/universe Translation-en [5124 kB] +Get:21 http://archive.ubuntu.com/ubuntu focal/universe amd64 c-n-f Metadata [265 kB] +Get:22 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 Packages [144 kB] +Get:23 http://archive.ubuntu.com/ubuntu focal/multiverse Translation-en [104 kB] +Get:24 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 c-n-f Metadata [9136 B] +Get:25 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 Packages [1935 kB] +Get:26 http://archive.ubuntu.com/ubuntu focal-updates/main Translation-en [350 kB] +Get:27 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 c-n-f Metadata [15.6 kB] +Get:28 http://archive.ubuntu.com/ubuntu focal-updates/restricted amd64 Packages [1119 kB] +Get:29 http://archive.ubuntu.com/ubuntu focal-updates/restricted Translation-en [159 kB] +Get:30 http://archive.ubuntu.com/ubuntu focal-updates/restricted amd64 c-n-f Metadata [592 B] +Get:31 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 Packages [924 kB] +Get:32 http://archive.ubuntu.com/ubuntu focal-updates/universe Translation-en [208 kB] +Get:33 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 c-n-f Metadata [20.9 kB] +Get:34 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 Packages [24.4 kB] +Get:35 http://archive.ubuntu.com/ubuntu focal-updates/multiverse Translation-en [7336 B] +Get:36 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 c-n-f Metadata [596 B] +Get:37 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 Packages [44.8 kB] +Get:38 http://archive.ubuntu.com/ubuntu focal-backports/main Translation-en [11.3 kB] +Get:39 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 c-n-f Metadata [976 B] +Get:40 http://archive.ubuntu.com/ubuntu focal-backports/restricted amd64 c-n-f Metadata [116 B] +Get:41 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 Packages [23.7 kB] +Get:42 http://archive.ubuntu.com/ubuntu focal-backports/universe Translation-en [15.9 kB] +Get:43 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 c-n-f Metadata [860 B] +Get:44 http://archive.ubuntu.com/ubuntu focal-backports/multiverse amd64 c-n-f Metadata [116 B] +Fetched 23.4 MB in 5s (4412 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:2 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 18 not upgraded. +Need to get 731 kB of archives. +After this operation, 5661 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/devops amd64 osm-devops all 11.0.3-1 [731 kB] +Fetched 731 kB in 0s (2321 kB/s) +Selecting previously unselected package osm-devops. +(Reading database ... 63796 files and directories currently installed.) +Preparing to unpack .../osm-devops_11.0.3-1_all.deb ... +Unpacking osm-devops (11.0.3-1) ... +Setting up osm-devops (11.0.3-1) ... +## Wed Jun 29 16:48:06 CEST 2022 source: INFO: logging sourced +## Wed Jun 29 16:48:06 CEST 2022 source: INFO: config sourced +## Wed Jun 29 16:48:06 CEST 2022 source: INFO: container sourced +## Wed Jun 29 16:48:06 CEST 2022 source: INFO: git_functions sourced +## Wed Jun 29 16:48:06 CEST 2022 source: INFO: track sourced +Checking required packages: git wget curl tar snapd +/. +/etc +/etc/bash_completion.d +/etc/bash_completion.d/git-prompt +/usr +/usr/bin +/usr/bin/git +/usr/bin/git-shell +/usr/lib +/usr/lib/git-core +/usr/lib/git-core/git +/usr/lib/git-core/git-add--interactive +/usr/lib/git-core/git-bisect +/usr/lib/git-core/git-credential-cache +/usr/lib/git-core/git-credential-cache--daemon +/usr/lib/git-core/git-credential-store +/usr/lib/git-core/git-daemon +/usr/lib/git-core/git-difftool--helper +/usr/lib/git-core/git-fast-import +/usr/lib/git-core/git-filter-branch +/usr/lib/git-core/git-http-backend +/usr/lib/git-core/git-http-fetch +/usr/lib/git-core/git-http-push +/usr/lib/git-core/git-imap-send +/usr/lib/git-core/git-instaweb +/usr/lib/git-core/git-legacy-stash +/usr/lib/git-core/git-merge-octopus +/usr/lib/git-core/git-merge-one-file +/usr/lib/git-core/git-merge-resolve +/usr/lib/git-core/git-mergetool +/usr/lib/git-core/git-mergetool--lib +/usr/lib/git-core/git-parse-remote +/usr/lib/git-core/git-quiltimport +/usr/lib/git-core/git-rebase--preserve-merges +/usr/lib/git-core/git-remote-http +/usr/lib/git-core/git-remote-testsvn +/usr/lib/git-core/git-request-pull +/usr/lib/git-core/git-sh-i18n +/usr/lib/git-core/git-sh-i18n--envsubst +/usr/lib/git-core/git-sh-prompt +/usr/lib/git-core/git-sh-setup +/usr/lib/git-core/git-shell +/usr/lib/git-core/git-submodule +/usr/lib/git-core/git-subtree +/usr/lib/git-core/git-web--browse +/usr/lib/git-core/mergetools +/usr/lib/git-core/mergetools/araxis +/usr/lib/git-core/mergetools/bc +/usr/lib/git-core/mergetools/bc3 +/usr/lib/git-core/mergetools/codecompare +/usr/lib/git-core/mergetools/deltawalker +/usr/lib/git-core/mergetools/diffmerge +/usr/lib/git-core/mergetools/diffuse +/usr/lib/git-core/mergetools/ecmerge +/usr/lib/git-core/mergetools/emerge +/usr/lib/git-core/mergetools/examdiff +/usr/lib/git-core/mergetools/guiffy +/usr/lib/git-core/mergetools/gvimdiff +/usr/lib/git-core/mergetools/gvimdiff2 +/usr/lib/git-core/mergetools/gvimdiff3 +/usr/lib/git-core/mergetools/kdiff3 +/usr/lib/git-core/mergetools/kompare +/usr/lib/git-core/mergetools/meld +/usr/lib/git-core/mergetools/opendiff +/usr/lib/git-core/mergetools/p4merge +/usr/lib/git-core/mergetools/smerge +/usr/lib/git-core/mergetools/tkdiff +/usr/lib/git-core/mergetools/tortoisemerge +/usr/lib/git-core/mergetools/vimdiff +/usr/lib/git-core/mergetools/vimdiff2 +/usr/lib/git-core/mergetools/vimdiff3 +/usr/lib/git-core/mergetools/winmerge +/usr/lib/git-core/mergetools/xxdiff +/usr/share +/usr/share/bash-completion +/usr/share/bash-completion/completions +/usr/share/bash-completion/completions/git +/usr/share/doc +/usr/share/doc/git +/usr/share/doc/git/NEWS.Debian.gz +/usr/share/doc/git/README.Debian +/usr/share/doc/git/README.emacs +/usr/share/doc/git/README.md +/usr/share/doc/git/README.source +/usr/share/doc/git/RelNotes +/usr/share/doc/git/RelNotes/1.5.0.1.txt +/usr/share/doc/git/RelNotes/1.5.0.2.txt +/usr/share/doc/git/RelNotes/1.5.0.3.txt +/usr/share/doc/git/RelNotes/1.5.0.4.txt +/usr/share/doc/git/RelNotes/1.5.0.5.txt +/usr/share/doc/git/RelNotes/1.5.0.6.txt +/usr/share/doc/git/RelNotes/1.5.0.7.txt +/usr/share/doc/git/RelNotes/1.5.0.txt +/usr/share/doc/git/RelNotes/1.5.1.1.txt +/usr/share/doc/git/RelNotes/1.5.1.2.txt +/usr/share/doc/git/RelNotes/1.5.1.3.txt +/usr/share/doc/git/RelNotes/1.5.1.4.txt +/usr/share/doc/git/RelNotes/1.5.1.5.txt +/usr/share/doc/git/RelNotes/1.5.1.6.txt +/usr/share/doc/git/RelNotes/1.5.1.txt +/usr/share/doc/git/RelNotes/1.5.2.1.txt +/usr/share/doc/git/RelNotes/1.5.2.2.txt +/usr/share/doc/git/RelNotes/1.5.2.3.txt +/usr/share/doc/git/RelNotes/1.5.2.4.txt +/usr/share/doc/git/RelNotes/1.5.2.5.txt +/usr/share/doc/git/RelNotes/1.5.2.txt +/usr/share/doc/git/RelNotes/1.5.3.1.txt +/usr/share/doc/git/RelNotes/1.5.3.2.txt +/usr/share/doc/git/RelNotes/1.5.3.3.txt +/usr/share/doc/git/RelNotes/1.5.3.4.txt +/usr/share/doc/git/RelNotes/1.5.3.5.txt +/usr/share/doc/git/RelNotes/1.5.3.6.txt +/usr/share/doc/git/RelNotes/1.5.3.7.txt +/usr/share/doc/git/RelNotes/1.5.3.8.txt +/usr/share/doc/git/RelNotes/1.5.3.txt +/usr/share/doc/git/RelNotes/1.5.4.1.txt +/usr/share/doc/git/RelNotes/1.5.4.2.txt +/usr/share/doc/git/RelNotes/1.5.4.3.txt +/usr/share/doc/git/RelNotes/1.5.4.4.txt +/usr/share/doc/git/RelNotes/1.5.4.5.txt +/usr/share/doc/git/RelNotes/1.5.4.6.txt +/usr/share/doc/git/RelNotes/1.5.4.7.txt +/usr/share/doc/git/RelNotes/1.5.4.txt +/usr/share/doc/git/RelNotes/1.5.5.1.txt +/usr/share/doc/git/RelNotes/1.5.5.2.txt +/usr/share/doc/git/RelNotes/1.5.5.3.txt +/usr/share/doc/git/RelNotes/1.5.5.4.txt +/usr/share/doc/git/RelNotes/1.5.5.5.txt +/usr/share/doc/git/RelNotes/1.5.5.6.txt +/usr/share/doc/git/RelNotes/1.5.5.txt +/usr/share/doc/git/RelNotes/1.5.6.1.txt +/usr/share/doc/git/RelNotes/1.5.6.2.txt +/usr/share/doc/git/RelNotes/1.5.6.3.txt +/usr/share/doc/git/RelNotes/1.5.6.4.txt +/usr/share/doc/git/RelNotes/1.5.6.5.txt +/usr/share/doc/git/RelNotes/1.5.6.6.txt +/usr/share/doc/git/RelNotes/1.5.6.txt +/usr/share/doc/git/RelNotes/1.6.0.1.txt +/usr/share/doc/git/RelNotes/1.6.0.2.txt +/usr/share/doc/git/RelNotes/1.6.0.3.txt +/usr/share/doc/git/RelNotes/1.6.0.4.txt +/usr/share/doc/git/RelNotes/1.6.0.5.txt +/usr/share/doc/git/RelNotes/1.6.0.6.txt +/usr/share/doc/git/RelNotes/1.6.0.txt +/usr/share/doc/git/RelNotes/1.6.1.1.txt +/usr/share/doc/git/RelNotes/1.6.1.2.txt +/usr/share/doc/git/RelNotes/1.6.1.3.txt +/usr/share/doc/git/RelNotes/1.6.1.4.txt +/usr/share/doc/git/RelNotes/1.6.1.txt +/usr/share/doc/git/RelNotes/1.6.2.1.txt +/usr/share/doc/git/RelNotes/1.6.2.2.txt +/usr/share/doc/git/RelNotes/1.6.2.3.txt +/usr/share/doc/git/RelNotes/1.6.2.4.txt +/usr/share/doc/git/RelNotes/1.6.2.5.txt +/usr/share/doc/git/RelNotes/1.6.2.txt +/usr/share/doc/git/RelNotes/1.6.3.1.txt +/usr/share/doc/git/RelNotes/1.6.3.2.txt +/usr/share/doc/git/RelNotes/1.6.3.3.txt +/usr/share/doc/git/RelNotes/1.6.3.4.txt +/usr/share/doc/git/RelNotes/1.6.3.txt +/usr/share/doc/git/RelNotes/1.6.4.1.txt +/usr/share/doc/git/RelNotes/1.6.4.2.txt +/usr/share/doc/git/RelNotes/1.6.4.3.txt +/usr/share/doc/git/RelNotes/1.6.4.4.txt +/usr/share/doc/git/RelNotes/1.6.4.5.txt +/usr/share/doc/git/RelNotes/1.6.4.txt +/usr/share/doc/git/RelNotes/1.6.5.1.txt +/usr/share/doc/git/RelNotes/1.6.5.2.txt +/usr/share/doc/git/RelNotes/1.6.5.3.txt +/usr/share/doc/git/RelNotes/1.6.5.4.txt +/usr/share/doc/git/RelNotes/1.6.5.5.txt +/usr/share/doc/git/RelNotes/1.6.5.6.txt +/usr/share/doc/git/RelNotes/1.6.5.7.txt +/usr/share/doc/git/RelNotes/1.6.5.8.txt +/usr/share/doc/git/RelNotes/1.6.5.9.txt +/usr/share/doc/git/RelNotes/1.6.5.txt +/usr/share/doc/git/RelNotes/1.6.6.1.txt +/usr/share/doc/git/RelNotes/1.6.6.2.txt +/usr/share/doc/git/RelNotes/1.6.6.3.txt +/usr/share/doc/git/RelNotes/1.6.6.txt +/usr/share/doc/git/RelNotes/1.7.0.1.txt +/usr/share/doc/git/RelNotes/1.7.0.2.txt +/usr/share/doc/git/RelNotes/1.7.0.3.txt +/usr/share/doc/git/RelNotes/1.7.0.4.txt +/usr/share/doc/git/RelNotes/1.7.0.5.txt +/usr/share/doc/git/RelNotes/1.7.0.6.txt +/usr/share/doc/git/RelNotes/1.7.0.7.txt +/usr/share/doc/git/RelNotes/1.7.0.8.txt +/usr/share/doc/git/RelNotes/1.7.0.9.txt +/usr/share/doc/git/RelNotes/1.7.0.txt +/usr/share/doc/git/RelNotes/1.7.1.1.txt +/usr/share/doc/git/RelNotes/1.7.1.2.txt +/usr/share/doc/git/RelNotes/1.7.1.3.txt +/usr/share/doc/git/RelNotes/1.7.1.4.txt +/usr/share/doc/git/RelNotes/1.7.1.txt +/usr/share/doc/git/RelNotes/1.7.10.1.txt +/usr/share/doc/git/RelNotes/1.7.10.2.txt +/usr/share/doc/git/RelNotes/1.7.10.3.txt +/usr/share/doc/git/RelNotes/1.7.10.4.txt +/usr/share/doc/git/RelNotes/1.7.10.5.txt +/usr/share/doc/git/RelNotes/1.7.10.txt +/usr/share/doc/git/RelNotes/1.7.11.1.txt +/usr/share/doc/git/RelNotes/1.7.11.2.txt +/usr/share/doc/git/RelNotes/1.7.11.3.txt +/usr/share/doc/git/RelNotes/1.7.11.4.txt +/usr/share/doc/git/RelNotes/1.7.11.5.txt +/usr/share/doc/git/RelNotes/1.7.11.6.txt +/usr/share/doc/git/RelNotes/1.7.11.7.txt +/usr/share/doc/git/RelNotes/1.7.11.txt +/usr/share/doc/git/RelNotes/1.7.12.1.txt +/usr/share/doc/git/RelNotes/1.7.12.2.txt +/usr/share/doc/git/RelNotes/1.7.12.3.txt +/usr/share/doc/git/RelNotes/1.7.12.4.txt +/usr/share/doc/git/RelNotes/1.7.12.txt +/usr/share/doc/git/RelNotes/1.7.2.1.txt +/usr/share/doc/git/RelNotes/1.7.2.2.txt +/usr/share/doc/git/RelNotes/1.7.2.3.txt +/usr/share/doc/git/RelNotes/1.7.2.4.txt +/usr/share/doc/git/RelNotes/1.7.2.5.txt +/usr/share/doc/git/RelNotes/1.7.2.txt +/usr/share/doc/git/RelNotes/1.7.3.1.txt +/usr/share/doc/git/RelNotes/1.7.3.2.txt +/usr/share/doc/git/RelNotes/1.7.3.3.txt +/usr/share/doc/git/RelNotes/1.7.3.4.txt +/usr/share/doc/git/RelNotes/1.7.3.5.txt +/usr/share/doc/git/RelNotes/1.7.3.txt +/usr/share/doc/git/RelNotes/1.7.4.1.txt +/usr/share/doc/git/RelNotes/1.7.4.2.txt +/usr/share/doc/git/RelNotes/1.7.4.3.txt +/usr/share/doc/git/RelNotes/1.7.4.4.txt +/usr/share/doc/git/RelNotes/1.7.4.5.txt +/usr/share/doc/git/RelNotes/1.7.4.txt +/usr/share/doc/git/RelNotes/1.7.5.1.txt +/usr/share/doc/git/RelNotes/1.7.5.2.txt +/usr/share/doc/git/RelNotes/1.7.5.3.txt +/usr/share/doc/git/RelNotes/1.7.5.4.txt +/usr/share/doc/git/RelNotes/1.7.5.txt +/usr/share/doc/git/RelNotes/1.7.6.1.txt +/usr/share/doc/git/RelNotes/1.7.6.2.txt +/usr/share/doc/git/RelNotes/1.7.6.3.txt +/usr/share/doc/git/RelNotes/1.7.6.4.txt +/usr/share/doc/git/RelNotes/1.7.6.5.txt +/usr/share/doc/git/RelNotes/1.7.6.6.txt +/usr/share/doc/git/RelNotes/1.7.6.txt +/usr/share/doc/git/RelNotes/1.7.7.1.txt +/usr/share/doc/git/RelNotes/1.7.7.2.txt +/usr/share/doc/git/RelNotes/1.7.7.3.txt +/usr/share/doc/git/RelNotes/1.7.7.4.txt +/usr/share/doc/git/RelNotes/1.7.7.5.txt +/usr/share/doc/git/RelNotes/1.7.7.6.txt +/usr/share/doc/git/RelNotes/1.7.7.7.txt +/usr/share/doc/git/RelNotes/1.7.7.txt +/usr/share/doc/git/RelNotes/1.7.8.1.txt +/usr/share/doc/git/RelNotes/1.7.8.2.txt +/usr/share/doc/git/RelNotes/1.7.8.3.txt +/usr/share/doc/git/RelNotes/1.7.8.4.txt +/usr/share/doc/git/RelNotes/1.7.8.5.txt +/usr/share/doc/git/RelNotes/1.7.8.6.txt +/usr/share/doc/git/RelNotes/1.7.8.txt +/usr/share/doc/git/RelNotes/1.7.9.1.txt +/usr/share/doc/git/RelNotes/1.7.9.2.txt +/usr/share/doc/git/RelNotes/1.7.9.3.txt +/usr/share/doc/git/RelNotes/1.7.9.4.txt +/usr/share/doc/git/RelNotes/1.7.9.5.txt +/usr/share/doc/git/RelNotes/1.7.9.6.txt +/usr/share/doc/git/RelNotes/1.7.9.7.txt +/usr/share/doc/git/RelNotes/1.7.9.txt +/usr/share/doc/git/RelNotes/1.8.0.1.txt +/usr/share/doc/git/RelNotes/1.8.0.2.txt +/usr/share/doc/git/RelNotes/1.8.0.3.txt +/usr/share/doc/git/RelNotes/1.8.0.txt +/usr/share/doc/git/RelNotes/1.8.1.1.txt +/usr/share/doc/git/RelNotes/1.8.1.2.txt +/usr/share/doc/git/RelNotes/1.8.1.3.txt +/usr/share/doc/git/RelNotes/1.8.1.4.txt +/usr/share/doc/git/RelNotes/1.8.1.5.txt +/usr/share/doc/git/RelNotes/1.8.1.6.txt +/usr/share/doc/git/RelNotes/1.8.1.txt +/usr/share/doc/git/RelNotes/1.8.2.1.txt +/usr/share/doc/git/RelNotes/1.8.2.2.txt +/usr/share/doc/git/RelNotes/1.8.2.3.txt +/usr/share/doc/git/RelNotes/1.8.2.txt +/usr/share/doc/git/RelNotes/1.8.3.1.txt +/usr/share/doc/git/RelNotes/1.8.3.2.txt +/usr/share/doc/git/RelNotes/1.8.3.3.txt +/usr/share/doc/git/RelNotes/1.8.3.4.txt +/usr/share/doc/git/RelNotes/1.8.3.txt +/usr/share/doc/git/RelNotes/1.8.4.1.txt +/usr/share/doc/git/RelNotes/1.8.4.2.txt +/usr/share/doc/git/RelNotes/1.8.4.3.txt +/usr/share/doc/git/RelNotes/1.8.4.4.txt +/usr/share/doc/git/RelNotes/1.8.4.5.txt +/usr/share/doc/git/RelNotes/1.8.4.txt +/usr/share/doc/git/RelNotes/1.8.5.1.txt +/usr/share/doc/git/RelNotes/1.8.5.2.txt +/usr/share/doc/git/RelNotes/1.8.5.3.txt +/usr/share/doc/git/RelNotes/1.8.5.4.txt +/usr/share/doc/git/RelNotes/1.8.5.5.txt +/usr/share/doc/git/RelNotes/1.8.5.6.txt +/usr/share/doc/git/RelNotes/1.8.5.txt +/usr/share/doc/git/RelNotes/1.9.0.txt +/usr/share/doc/git/RelNotes/1.9.1.txt +/usr/share/doc/git/RelNotes/1.9.2.txt +/usr/share/doc/git/RelNotes/1.9.3.txt +/usr/share/doc/git/RelNotes/1.9.4.txt +/usr/share/doc/git/RelNotes/1.9.5.txt +/usr/share/doc/git/RelNotes/2.0.0.txt +/usr/share/doc/git/RelNotes/2.0.1.txt +/usr/share/doc/git/RelNotes/2.0.2.txt +/usr/share/doc/git/RelNotes/2.0.3.txt +/usr/share/doc/git/RelNotes/2.0.4.txt +/usr/share/doc/git/RelNotes/2.0.5.txt +/usr/share/doc/git/RelNotes/2.1.0.txt +/usr/share/doc/git/RelNotes/2.1.1.txt +/usr/share/doc/git/RelNotes/2.1.2.txt +/usr/share/doc/git/RelNotes/2.1.3.txt +/usr/share/doc/git/RelNotes/2.1.4.txt +/usr/share/doc/git/RelNotes/2.10.0.txt +/usr/share/doc/git/RelNotes/2.10.1.txt +/usr/share/doc/git/RelNotes/2.10.2.txt +/usr/share/doc/git/RelNotes/2.10.3.txt +/usr/share/doc/git/RelNotes/2.10.4.txt +/usr/share/doc/git/RelNotes/2.10.5.txt +/usr/share/doc/git/RelNotes/2.11.0.txt +/usr/share/doc/git/RelNotes/2.11.1.txt +/usr/share/doc/git/RelNotes/2.11.2.txt +/usr/share/doc/git/RelNotes/2.11.3.txt +/usr/share/doc/git/RelNotes/2.11.4.txt +/usr/share/doc/git/RelNotes/2.12.0.txt +/usr/share/doc/git/RelNotes/2.12.1.txt +/usr/share/doc/git/RelNotes/2.12.2.txt +/usr/share/doc/git/RelNotes/2.12.3.txt +/usr/share/doc/git/RelNotes/2.12.4.txt +/usr/share/doc/git/RelNotes/2.12.5.txt +/usr/share/doc/git/RelNotes/2.13.0.txt +/usr/share/doc/git/RelNotes/2.13.1.txt +/usr/share/doc/git/RelNotes/2.13.2.txt +/usr/share/doc/git/RelNotes/2.13.3.txt +/usr/share/doc/git/RelNotes/2.13.4.txt +/usr/share/doc/git/RelNotes/2.13.5.txt +/usr/share/doc/git/RelNotes/2.13.6.txt +/usr/share/doc/git/RelNotes/2.13.7.txt +/usr/share/doc/git/RelNotes/2.14.0.txt +/usr/share/doc/git/RelNotes/2.14.1.txt +/usr/share/doc/git/RelNotes/2.14.2.txt +/usr/share/doc/git/RelNotes/2.14.3.txt +/usr/share/doc/git/RelNotes/2.14.4.txt +/usr/share/doc/git/RelNotes/2.14.5.txt +/usr/share/doc/git/RelNotes/2.14.6.txt +/usr/share/doc/git/RelNotes/2.15.0.txt +/usr/share/doc/git/RelNotes/2.15.1.txt +/usr/share/doc/git/RelNotes/2.15.2.txt +/usr/share/doc/git/RelNotes/2.15.3.txt +/usr/share/doc/git/RelNotes/2.15.4.txt +/usr/share/doc/git/RelNotes/2.16.0.txt +/usr/share/doc/git/RelNotes/2.16.1.txt +/usr/share/doc/git/RelNotes/2.16.2.txt +/usr/share/doc/git/RelNotes/2.16.3.txt +/usr/share/doc/git/RelNotes/2.16.4.txt +/usr/share/doc/git/RelNotes/2.16.5.txt +/usr/share/doc/git/RelNotes/2.16.6.txt +/usr/share/doc/git/RelNotes/2.17.0.txt +/usr/share/doc/git/RelNotes/2.17.1.txt +/usr/share/doc/git/RelNotes/2.17.2.txt +/usr/share/doc/git/RelNotes/2.17.3.txt +/usr/share/doc/git/RelNotes/2.18.0.txt +/usr/share/doc/git/RelNotes/2.18.1.txt +/usr/share/doc/git/RelNotes/2.18.2.txt +/usr/share/doc/git/RelNotes/2.19.0.txt +/usr/share/doc/git/RelNotes/2.19.1.txt +/usr/share/doc/git/RelNotes/2.19.2.txt +/usr/share/doc/git/RelNotes/2.19.3.txt +/usr/share/doc/git/RelNotes/2.2.0.txt +/usr/share/doc/git/RelNotes/2.2.1.txt +/usr/share/doc/git/RelNotes/2.2.2.txt +/usr/share/doc/git/RelNotes/2.2.3.txt +/usr/share/doc/git/RelNotes/2.20.0.txt +/usr/share/doc/git/RelNotes/2.20.1.txt +/usr/share/doc/git/RelNotes/2.20.2.txt +/usr/share/doc/git/RelNotes/2.21.0.txt +/usr/share/doc/git/RelNotes/2.21.1.txt +/usr/share/doc/git/RelNotes/2.22.0.txt +/usr/share/doc/git/RelNotes/2.22.1.txt +/usr/share/doc/git/RelNotes/2.22.2.txt +/usr/share/doc/git/RelNotes/2.23.0.txt +/usr/share/doc/git/RelNotes/2.23.1.txt +/usr/share/doc/git/RelNotes/2.24.0.txt +/usr/share/doc/git/RelNotes/2.24.1.txt +/usr/share/doc/git/RelNotes/2.25.0.txt +/usr/share/doc/git/RelNotes/2.25.1.txt +/usr/share/doc/git/RelNotes/2.3.0.txt +/usr/share/doc/git/RelNotes/2.3.1.txt +/usr/share/doc/git/RelNotes/2.3.10.txt +/usr/share/doc/git/RelNotes/2.3.2.txt +/usr/share/doc/git/RelNotes/2.3.3.txt +/usr/share/doc/git/RelNotes/2.3.4.txt +/usr/share/doc/git/RelNotes/2.3.5.txt +/usr/share/doc/git/RelNotes/2.3.6.txt +/usr/share/doc/git/RelNotes/2.3.7.txt +/usr/share/doc/git/RelNotes/2.3.8.txt +/usr/share/doc/git/RelNotes/2.3.9.txt +/usr/share/doc/git/RelNotes/2.4.0.txt +/usr/share/doc/git/RelNotes/2.4.1.txt +/usr/share/doc/git/RelNotes/2.4.10.txt +/usr/share/doc/git/RelNotes/2.4.11.txt +/usr/share/doc/git/RelNotes/2.4.12.txt +/usr/share/doc/git/RelNotes/2.4.2.txt +/usr/share/doc/git/RelNotes/2.4.3.txt +/usr/share/doc/git/RelNotes/2.4.4.txt +/usr/share/doc/git/RelNotes/2.4.5.txt +/usr/share/doc/git/RelNotes/2.4.6.txt +/usr/share/doc/git/RelNotes/2.4.7.txt +/usr/share/doc/git/RelNotes/2.4.8.txt +/usr/share/doc/git/RelNotes/2.4.9.txt +/usr/share/doc/git/RelNotes/2.5.0.txt +/usr/share/doc/git/RelNotes/2.5.1.txt +/usr/share/doc/git/RelNotes/2.5.2.txt +/usr/share/doc/git/RelNotes/2.5.3.txt +/usr/share/doc/git/RelNotes/2.5.4.txt +/usr/share/doc/git/RelNotes/2.5.5.txt +/usr/share/doc/git/RelNotes/2.5.6.txt +/usr/share/doc/git/RelNotes/2.6.0.txt +/usr/share/doc/git/RelNotes/2.6.1.txt +/usr/share/doc/git/RelNotes/2.6.2.txt +/usr/share/doc/git/RelNotes/2.6.3.txt +/usr/share/doc/git/RelNotes/2.6.4.txt +/usr/share/doc/git/RelNotes/2.6.5.txt +/usr/share/doc/git/RelNotes/2.6.6.txt +/usr/share/doc/git/RelNotes/2.6.7.txt +/usr/share/doc/git/RelNotes/2.7.0.txt +/usr/share/doc/git/RelNotes/2.7.1.txt +/usr/share/doc/git/RelNotes/2.7.2.txt +/usr/share/doc/git/RelNotes/2.7.3.txt +/usr/share/doc/git/RelNotes/2.7.4.txt +/usr/share/doc/git/RelNotes/2.7.5.txt +/usr/share/doc/git/RelNotes/2.7.6.txt +/usr/share/doc/git/RelNotes/2.8.0.txt +/usr/share/doc/git/RelNotes/2.8.1.txt +/usr/share/doc/git/RelNotes/2.8.2.txt +/usr/share/doc/git/RelNotes/2.8.3.txt +/usr/share/doc/git/RelNotes/2.8.4.txt +/usr/share/doc/git/RelNotes/2.8.5.txt +/usr/share/doc/git/RelNotes/2.8.6.txt +/usr/share/doc/git/RelNotes/2.9.0.txt +/usr/share/doc/git/RelNotes/2.9.1.txt +/usr/share/doc/git/RelNotes/2.9.2.txt +/usr/share/doc/git/RelNotes/2.9.3.txt +/usr/share/doc/git/RelNotes/2.9.4.txt +/usr/share/doc/git/RelNotes/2.9.5.txt +/usr/share/doc/git/changelog.Debian.gz +/usr/share/doc/git/contrib +/usr/share/doc/git/contrib/README +/usr/share/doc/git/contrib/buildsystems +/usr/share/doc/git/contrib/buildsystems/Generators +/usr/share/doc/git/contrib/buildsystems/Generators/QMake.pm +/usr/share/doc/git/contrib/buildsystems/Generators/Vcproj.pm +/usr/share/doc/git/contrib/buildsystems/Generators/Vcxproj.pm +/usr/share/doc/git/contrib/buildsystems/Generators.pm +/usr/share/doc/git/contrib/buildsystems/engine.pl +/usr/share/doc/git/contrib/buildsystems/generate +/usr/share/doc/git/contrib/buildsystems/parse.pl +/usr/share/doc/git/contrib/coccinelle +/usr/share/doc/git/contrib/coccinelle/README +/usr/share/doc/git/contrib/coccinelle/array.cocci +/usr/share/doc/git/contrib/coccinelle/commit.cocci +/usr/share/doc/git/contrib/coccinelle/flex_alloc.cocci +/usr/share/doc/git/contrib/coccinelle/free.cocci +/usr/share/doc/git/contrib/coccinelle/hashmap.cocci +/usr/share/doc/git/contrib/coccinelle/object_id.cocci +/usr/share/doc/git/contrib/coccinelle/preincr.cocci +/usr/share/doc/git/contrib/coccinelle/qsort.cocci +/usr/share/doc/git/contrib/coccinelle/strbuf.cocci +/usr/share/doc/git/contrib/coccinelle/swap.cocci +/usr/share/doc/git/contrib/coccinelle/the_repository.pending.cocci +/usr/share/doc/git/contrib/coccinelle/xstrdup_or_null.cocci +/usr/share/doc/git/contrib/contacts +/usr/share/doc/git/contrib/contacts/Makefile +/usr/share/doc/git/contrib/contacts/git-contacts +/usr/share/doc/git/contrib/contacts/git-contacts.txt +/usr/share/doc/git/contrib/coverage-diff.sh +/usr/share/doc/git/contrib/credential +/usr/share/doc/git/contrib/credential/gnome-keyring +/usr/share/doc/git/contrib/credential/gnome-keyring/Makefile +/usr/share/doc/git/contrib/credential/gnome-keyring/git-credential-gnome-keyring.c +/usr/share/doc/git/contrib/credential/libsecret +/usr/share/doc/git/contrib/credential/libsecret/Makefile +/usr/share/doc/git/contrib/credential/libsecret/git-credential-libsecret.c +/usr/share/doc/git/contrib/credential/netrc +/usr/share/doc/git/contrib/credential/netrc/Makefile +/usr/share/doc/git/contrib/credential/netrc/git-credential-netrc +/usr/share/doc/git/contrib/credential/netrc/t-git-credential-netrc.sh +/usr/share/doc/git/contrib/credential/netrc/test.command-option-gpg +/usr/share/doc/git/contrib/credential/netrc/test.git-config-gpg +/usr/share/doc/git/contrib/credential/netrc/test.netrc +/usr/share/doc/git/contrib/credential/netrc/test.pl +/usr/share/doc/git/contrib/credential/osxkeychain +/usr/share/doc/git/contrib/credential/osxkeychain/Makefile +/usr/share/doc/git/contrib/credential/osxkeychain/git-credential-osxkeychain.c +/usr/share/doc/git/contrib/credential/wincred +/usr/share/doc/git/contrib/credential/wincred/Makefile +/usr/share/doc/git/contrib/credential/wincred/git-credential-wincred.c +/usr/share/doc/git/contrib/diff-highlight +/usr/share/doc/git/contrib/diff-highlight/DiffHighlight.pm +/usr/share/doc/git/contrib/diff-highlight/Makefile +/usr/share/doc/git/contrib/diff-highlight/README +/usr/share/doc/git/contrib/diff-highlight/diff-highlight +/usr/share/doc/git/contrib/diff-highlight/diff-highlight.perl +/usr/share/doc/git/contrib/diff-highlight/shebang.perl +/usr/share/doc/git/contrib/diff-highlight/t +/usr/share/doc/git/contrib/diff-highlight/t/Makefile +/usr/share/doc/git/contrib/diff-highlight/t/t9400-diff-highlight.sh +/usr/share/doc/git/contrib/examples +/usr/share/doc/git/contrib/examples/README +/usr/share/doc/git/contrib/fast-import +/usr/share/doc/git/contrib/fast-import/git-import.perl +/usr/share/doc/git/contrib/fast-import/git-import.sh +/usr/share/doc/git/contrib/fast-import/git-p4.README +/usr/share/doc/git/contrib/fast-import/import-directories.perl +/usr/share/doc/git/contrib/fast-import/import-tars.perl +/usr/share/doc/git/contrib/fast-import/import-zips.py +/usr/share/doc/git/contrib/git-jump +/usr/share/doc/git/contrib/git-jump/README +/usr/share/doc/git/contrib/git-jump/git-jump +/usr/share/doc/git/contrib/git-resurrect.sh +/usr/share/doc/git/contrib/git-shell-commands +/usr/share/doc/git/contrib/git-shell-commands/README +/usr/share/doc/git/contrib/git-shell-commands/help +/usr/share/doc/git/contrib/git-shell-commands/list +/usr/share/doc/git/contrib/hg-to-git +/usr/share/doc/git/contrib/hg-to-git/hg-to-git.py +/usr/share/doc/git/contrib/hg-to-git/hg-to-git.txt +/usr/share/doc/git/contrib/long-running-filter +/usr/share/doc/git/contrib/long-running-filter/example.pl +/usr/share/doc/git/contrib/persistent-https +/usr/share/doc/git/contrib/persistent-https/Makefile +/usr/share/doc/git/contrib/persistent-https/README +/usr/share/doc/git/contrib/persistent-https/client.go +/usr/share/doc/git/contrib/persistent-https/main.go +/usr/share/doc/git/contrib/persistent-https/proxy.go +/usr/share/doc/git/contrib/persistent-https/socket.go +/usr/share/doc/git/contrib/remote-helpers +/usr/share/doc/git/contrib/remote-helpers/README +/usr/share/doc/git/contrib/remote-helpers/git-remote-bzr +/usr/share/doc/git/contrib/remote-helpers/git-remote-hg +/usr/share/doc/git/contrib/remotes2config.sh +/usr/share/doc/git/contrib/rerere-train.sh +/usr/share/doc/git/contrib/stats +/usr/share/doc/git/contrib/stats/git-common-hash +/usr/share/doc/git/contrib/stats/mailmap.pl +/usr/share/doc/git/contrib/stats/packinfo.pl +/usr/share/doc/git/contrib/subtree +/usr/share/doc/git/contrib/subtree/INSTALL +/usr/share/doc/git/contrib/subtree/Makefile +/usr/share/doc/git/contrib/subtree/README +/usr/share/doc/git/contrib/subtree/git-subtree +/usr/share/doc/git/contrib/subtree/git-subtree.sh +/usr/share/doc/git/contrib/subtree/git-subtree.txt +/usr/share/doc/git/contrib/subtree/t +/usr/share/doc/git/contrib/subtree/t/Makefile +/usr/share/doc/git/contrib/subtree/t/t7900-subtree.sh +/usr/share/doc/git/contrib/subtree/todo +/usr/share/doc/git/contrib/svn-fe +/usr/share/doc/git/contrib/svn-fe/Makefile +/usr/share/doc/git/contrib/svn-fe/svn-fe.c +/usr/share/doc/git/contrib/svn-fe/svn-fe.txt +/usr/share/doc/git/contrib/svn-fe/svnrdump_sim.py +/usr/share/doc/git/contrib/thunderbird-patch-inline +/usr/share/doc/git/contrib/thunderbird-patch-inline/README +/usr/share/doc/git/contrib/thunderbird-patch-inline/appp.sh +/usr/share/doc/git/contrib/update-unicode +/usr/share/doc/git/contrib/update-unicode/README +/usr/share/doc/git/contrib/update-unicode/update_unicode.sh +/usr/share/doc/git/contrib/vscode +/usr/share/doc/git/contrib/vscode/README.md +/usr/share/doc/git/contrib/vscode/init.sh +/usr/share/doc/git/contrib/workdir +/usr/share/doc/git/contrib/workdir/git-new-workdir +/usr/share/doc/git/copyright +/usr/share/git-core +/usr/share/git-core/contrib +/usr/share/git-core/contrib/hooks +/usr/share/git-core/contrib/hooks/post-receive-email +/usr/share/git-core/contrib/hooks/pre-auto-gc-battery +/usr/share/git-core/contrib/hooks/setgitperms.perl +/usr/share/git-core/contrib/hooks/update-paranoid +/usr/share/git-core/templates +/usr/share/git-core/templates/branches +/usr/share/git-core/templates/description +/usr/share/git-core/templates/hooks +/usr/share/git-core/templates/hooks/applypatch-msg.sample +/usr/share/git-core/templates/hooks/commit-msg.sample +/usr/share/git-core/templates/hooks/fsmonitor-watchman.sample +/usr/share/git-core/templates/hooks/post-update.sample +/usr/share/git-core/templates/hooks/pre-applypatch.sample +/usr/share/git-core/templates/hooks/pre-commit.sample +/usr/share/git-core/templates/hooks/pre-merge-commit.sample +/usr/share/git-core/templates/hooks/pre-push.sample +/usr/share/git-core/templates/hooks/pre-rebase.sample +/usr/share/git-core/templates/hooks/pre-receive.sample +/usr/share/git-core/templates/hooks/prepare-commit-msg.sample +/usr/share/git-core/templates/hooks/update.sample +/usr/share/git-core/templates/info +/usr/share/git-core/templates/info/exclude +/usr/share/gitweb +/usr/share/gitweb/gitweb.cgi +/usr/share/gitweb/static +/usr/share/gitweb/static/git-favicon.png +/usr/share/gitweb/static/git-logo.png +/usr/share/gitweb/static/gitweb.css +/usr/share/gitweb/static/gitweb.js +/usr/share/lintian +/usr/share/lintian/overrides +/usr/share/lintian/overrides/git +/usr/share/perl5 +/usr/share/perl5/Git +/usr/share/perl5/Git/I18N.pm +/usr/share/perl5/Git/IndexInfo.pm +/usr/share/perl5/Git/LoadCPAN +/usr/share/perl5/Git/LoadCPAN/Error.pm +/usr/share/perl5/Git/LoadCPAN/Mail +/usr/share/perl5/Git/LoadCPAN/Mail/Address.pm +/usr/share/perl5/Git/LoadCPAN.pm +/usr/share/perl5/Git/Packet.pm +/usr/share/perl5/Git.pm +/var +/var/lib +/var/lib/git +/usr/bin/git-receive-pack +/usr/bin/git-upload-archive +/usr/bin/git-upload-pack +/usr/lib/git-core/git-add +/usr/lib/git-core/git-am +/usr/lib/git-core/git-annotate +/usr/lib/git-core/git-apply +/usr/lib/git-core/git-archive +/usr/lib/git-core/git-bisect--helper +/usr/lib/git-core/git-blame +/usr/lib/git-core/git-branch +/usr/lib/git-core/git-bundle +/usr/lib/git-core/git-cat-file +/usr/lib/git-core/git-check-attr +/usr/lib/git-core/git-check-ignore +/usr/lib/git-core/git-check-mailmap +/usr/lib/git-core/git-check-ref-format +/usr/lib/git-core/git-checkout +/usr/lib/git-core/git-checkout-index +/usr/lib/git-core/git-cherry +/usr/lib/git-core/git-cherry-pick +/usr/lib/git-core/git-clean +/usr/lib/git-core/git-clone +/usr/lib/git-core/git-column +/usr/lib/git-core/git-commit +/usr/lib/git-core/git-commit-graph +/usr/lib/git-core/git-commit-tree +/usr/lib/git-core/git-config +/usr/lib/git-core/git-count-objects +/usr/lib/git-core/git-credential +/usr/lib/git-core/git-describe +/usr/lib/git-core/git-diff +/usr/lib/git-core/git-diff-files +/usr/lib/git-core/git-diff-index +/usr/lib/git-core/git-diff-tree +/usr/lib/git-core/git-difftool +/usr/lib/git-core/git-env--helper +/usr/lib/git-core/git-fast-export +/usr/lib/git-core/git-fetch +/usr/lib/git-core/git-fetch-pack +/usr/lib/git-core/git-fmt-merge-msg +/usr/lib/git-core/git-for-each-ref +/usr/lib/git-core/git-format-patch +/usr/lib/git-core/git-fsck +/usr/lib/git-core/git-fsck-objects +/usr/lib/git-core/git-gc +/usr/lib/git-core/git-get-tar-commit-id +/usr/lib/git-core/git-grep +/usr/lib/git-core/git-hash-object +/usr/lib/git-core/git-help +/usr/lib/git-core/git-index-pack +/usr/lib/git-core/git-init +/usr/lib/git-core/git-init-db +/usr/lib/git-core/git-interpret-trailers +/usr/lib/git-core/git-log +/usr/lib/git-core/git-ls-files +/usr/lib/git-core/git-ls-remote +/usr/lib/git-core/git-ls-tree +/usr/lib/git-core/git-mailinfo +/usr/lib/git-core/git-mailsplit +/usr/lib/git-core/git-merge +/usr/lib/git-core/git-merge-base +/usr/lib/git-core/git-merge-file +/usr/lib/git-core/git-merge-index +/usr/lib/git-core/git-merge-ours +/usr/lib/git-core/git-merge-recursive +/usr/lib/git-core/git-merge-subtree +/usr/lib/git-core/git-merge-tree +/usr/lib/git-core/git-mktag +/usr/lib/git-core/git-mktree +/usr/lib/git-core/git-multi-pack-index +/usr/lib/git-core/git-mv +/usr/lib/git-core/git-name-rev +/usr/lib/git-core/git-notes +/usr/lib/git-core/git-pack-objects +/usr/lib/git-core/git-pack-redundant +/usr/lib/git-core/git-pack-refs +/usr/lib/git-core/git-patch-id +/usr/lib/git-core/git-prune +/usr/lib/git-core/git-prune-packed +/usr/lib/git-core/git-pull +/usr/lib/git-core/git-push +/usr/lib/git-core/git-range-diff +/usr/lib/git-core/git-read-tree +/usr/lib/git-core/git-rebase +/usr/lib/git-core/git-receive-pack +/usr/lib/git-core/git-reflog +/usr/lib/git-core/git-remote +/usr/lib/git-core/git-remote-ext +/usr/lib/git-core/git-remote-fd +/usr/lib/git-core/git-remote-ftp +/usr/lib/git-core/git-remote-ftps +/usr/lib/git-core/git-remote-https +/usr/lib/git-core/git-repack +/usr/lib/git-core/git-replace +/usr/lib/git-core/git-rerere +/usr/lib/git-core/git-reset +/usr/lib/git-core/git-restore +/usr/lib/git-core/git-rev-list +/usr/lib/git-core/git-rev-parse +/usr/lib/git-core/git-revert +/usr/lib/git-core/git-rm +/usr/lib/git-core/git-send-pack +/usr/lib/git-core/git-shortlog +/usr/lib/git-core/git-show +/usr/lib/git-core/git-show-branch +/usr/lib/git-core/git-show-index +/usr/lib/git-core/git-show-ref +/usr/lib/git-core/git-sparse-checkout +/usr/lib/git-core/git-stage +/usr/lib/git-core/git-stash +/usr/lib/git-core/git-status +/usr/lib/git-core/git-stripspace +/usr/lib/git-core/git-submodule--helper +/usr/lib/git-core/git-switch +/usr/lib/git-core/git-symbolic-ref +/usr/lib/git-core/git-tag +/usr/lib/git-core/git-unpack-file +/usr/lib/git-core/git-unpack-objects +/usr/lib/git-core/git-update-index +/usr/lib/git-core/git-update-ref +/usr/lib/git-core/git-update-server-info +/usr/lib/git-core/git-upload-archive +/usr/lib/git-core/git-upload-pack +/usr/lib/git-core/git-var +/usr/lib/git-core/git-verify-commit +/usr/lib/git-core/git-verify-pack +/usr/lib/git-core/git-verify-tag +/usr/lib/git-core/git-whatchanged +/usr/lib/git-core/git-worktree +/usr/lib/git-core/git-write-tree +/usr/share/bash-completion/completions/gitk +/usr/share/doc/git/contrib/hooks +/usr/share/doc/git/contrib/persistent-https/LICENSE +/usr/share/doc/git/contrib/subtree/COPYING +/usr/share/gitweb/index.cgi +/. +/etc +/etc/wgetrc +/usr +/usr/bin +/usr/bin/wget +/usr/share +/usr/share/doc +/usr/share/doc/wget +/usr/share/doc/wget/AUTHORS +/usr/share/doc/wget/MAILING-LIST +/usr/share/doc/wget/NEWS.gz +/usr/share/doc/wget/README +/usr/share/doc/wget/changelog.Debian.gz +/usr/share/doc/wget/copyright +/usr/share/info +/usr/share/info/wget.info.gz +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/wget.1.gz +/. +/usr +/usr/bin +/usr/bin/curl +/usr/share +/usr/share/doc +/usr/share/doc/curl +/usr/share/doc/curl/copyright +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/curl.1.gz +/usr/share/zsh +/usr/share/zsh/vendor-completions +/usr/share/zsh/vendor-completions/_curl +/usr/share/doc/curl/NEWS.Debian.gz +/usr/share/doc/curl/changelog.Debian.gz +/. +/bin +/bin/tar +/etc +/usr +/usr/lib +/usr/lib/mime +/usr/lib/mime/packages +/usr/lib/mime/packages/tar +/usr/sbin +/usr/sbin/rmt-tar +/usr/sbin/tarcat +/usr/share +/usr/share/doc +/usr/share/doc/tar +/usr/share/doc/tar/AUTHORS +/usr/share/doc/tar/NEWS.gz +/usr/share/doc/tar/README.Debian +/usr/share/doc/tar/THANKS.gz +/usr/share/doc/tar/changelog.Debian.gz +/usr/share/doc/tar/copyright +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/tar.1.gz +/usr/share/man/man1/tarcat.1.gz +/usr/share/man/man8 +/usr/share/man/man8/rmt-tar.8.gz +/etc/rmt +/. +/etc +/etc/apparmor.d +/etc/apparmor.d/usr.lib.snapd.snap-confine.real +/etc/apt +/etc/apt/apt.conf.d +/etc/apt/apt.conf.d/20snapd.conf +/etc/profile.d +/etc/profile.d/apps-bin-path.sh +/etc/xdg +/etc/xdg/autostart +/etc/xdg/autostart/snap-userd-autostart.desktop +/lib +/lib/systemd +/lib/systemd/system +/lib/systemd/system/snapd.apparmor.service +/lib/systemd/system/snapd.autoimport.service +/lib/systemd/system/snapd.core-fixup.service +/lib/systemd/system/snapd.failure.service +/lib/systemd/system/snapd.recovery-chooser-trigger.service +/lib/systemd/system/snapd.seeded.service +/lib/systemd/system/snapd.service +/lib/systemd/system/snapd.snap-repair.service +/lib/systemd/system/snapd.snap-repair.timer +/lib/systemd/system/snapd.socket +/lib/systemd/system/snapd.system-shutdown.service +/lib/systemd/system-generators +/lib/systemd/system-generators/snapd-generator +/lib/udev +/lib/udev/rules.d +/lib/udev/rules.d/66-snapd-autoimport.rules +/snap +/usr +/usr/bin +/usr/bin/snap +/usr/bin/snapfuse +/usr/lib +/usr/lib/environment.d +/usr/lib/environment.d/990-snapd.conf +/usr/lib/snapd +/usr/lib/snapd/complete.sh +/usr/lib/snapd/etelpmoc.sh +/usr/lib/snapd/info +/usr/lib/snapd/snap-bootstrap +/usr/lib/snapd/snap-confine +/usr/lib/snapd/snap-device-helper +/usr/lib/snapd/snap-discard-ns +/usr/lib/snapd/snap-exec +/usr/lib/snapd/snap-failure +/usr/lib/snapd/snap-gdb-shim +/usr/lib/snapd/snap-gdbserver-shim +/usr/lib/snapd/snap-mgmt +/usr/lib/snapd/snap-preseed +/usr/lib/snapd/snap-recovery-chooser +/usr/lib/snapd/snap-repair +/usr/lib/snapd/snap-seccomp +/usr/lib/snapd/snap-update-ns +/usr/lib/snapd/snapctl +/usr/lib/snapd/snapd +/usr/lib/snapd/snapd-apparmor +/usr/lib/snapd/snapd.core-fixup.sh +/usr/lib/snapd/snapd.run-from-snap +/usr/lib/snapd/system-shutdown +/usr/lib/systemd +/usr/lib/systemd/system-environment-generators +/usr/lib/systemd/system-environment-generators/snapd-env-generator +/usr/lib/systemd/user +/usr/lib/systemd/user/snapd.session-agent.service +/usr/lib/systemd/user/snapd.session-agent.socket +/usr/lib/systemd/user/sockets.target.wants +/usr/share +/usr/share/applications +/usr/share/applications/io.snapcraft.SessionAgent.desktop +/usr/share/applications/snap-handle-link.desktop +/usr/share/bash-completion +/usr/share/bash-completion/completions +/usr/share/bash-completion/completions/snap +/usr/share/dbus-1 +/usr/share/dbus-1/services +/usr/share/dbus-1/services/io.snapcraft.Launcher.service +/usr/share/dbus-1/services/io.snapcraft.SessionAgent.service +/usr/share/dbus-1/services/io.snapcraft.Settings.service +/usr/share/dbus-1/session.d +/usr/share/dbus-1/session.d/snapd.session-services.conf +/usr/share/dbus-1/system.d +/usr/share/dbus-1/system.d/snapd.system-services.conf +/usr/share/doc +/usr/share/doc/snapd +/usr/share/doc/snapd/changelog.gz +/usr/share/doc/snapd/copyright +/usr/share/fish +/usr/share/fish/vendor_conf.d +/usr/share/fish/vendor_conf.d/snapd.fish +/usr/share/man +/usr/share/man/man8 +/usr/share/man/man8/snap-confine.8.gz +/usr/share/man/man8/snap-discard-ns.8.gz +/usr/share/man/man8/snap.8.gz +/usr/share/man/man8/snapd-env-generator.8.gz +/usr/share/polkit-1 +/usr/share/polkit-1/actions +/usr/share/polkit-1/actions/io.snapcraft.snapd.policy +/usr/share/zsh +/usr/share/zsh/vendor-completions +/usr/share/zsh/vendor-completions/_snap +/var +/var/cache +/var/cache/snapd +/var/lib +/var/lib/snapd +/var/lib/snapd/apparmor +/var/lib/snapd/apparmor/snap-confine +/var/lib/snapd/auto-import +/var/lib/snapd/dbus-1 +/var/lib/snapd/dbus-1/services +/var/lib/snapd/dbus-1/system-services +/var/lib/snapd/desktop +/var/lib/snapd/desktop/applications +/var/lib/snapd/environment +/var/lib/snapd/firstboot +/var/lib/snapd/inhibit +/var/lib/snapd/lib +/var/lib/snapd/lib/gl +/var/lib/snapd/lib/gl32 +/var/lib/snapd/lib/glvnd +/var/lib/snapd/lib/vulkan +/var/lib/snapd/snaps +/var/lib/snapd/snaps/partial +/var/lib/snapd/ssl +/var/lib/snapd/ssl/store-certs +/var/lib/snapd/void +/var/snap +/lib/udev/snappy-app-dev +/usr/bin/snapctl +/usr/bin/ubuntu-core-launcher +/usr/lib/systemd/user/sockets.target.wants/snapd.session-agent.socket +Required packages are present: git wget curl tar snapd +jq 1.5+dfsg-1 from Michael Vogt (mvo*) installed +Track start release: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514104&event=start&operation=release&value=ReleaseELEVEN&comment=&tags= +Track start docker_tag: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514104&event=start&operation=docker_tag&value=11&comment=&tags= +Track start installation_type: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514104&event=start&operation=installation_type&value=Default&comment=&tags= +Track checks checkingroot_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514104&event=checks&operation=checkingroot_ok&value=&comment=&tags= +Track checks noroot_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514104&event=checks&operation=noroot_ok&value=&comment=&tags= +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? Y +Track checks proceed_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514168&event=checks&operation=proceed_ok&value=&comment=&tags= +Installing OSM +Determining IP address of the interface with the default route +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 2 +net.ipv4.conf.all.rp_filter = 2 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.default.promote_secondaries = 1 +sysctl: setting key "net.ipv4.conf.all.promote_secondaries": Invalid argument +net.ipv4.ping_group_range = 0 2147483647 +net.core.default_qdisc = fq_codel +fs.protected_regular = 1 +fs.protected_fifos = 1 +* Applying /usr/lib/sysctl.d/50-pid-max.conf ... +kernel.pid_max = 4194304 +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +sysctl: setting key "net.core.bpf_jit_limit": Invalid argument +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /usr/lib/sysctl.d/protect-links.conf ... +fs.protected_fifos = 1 +fs.protected_hardlinks = 1 +fs.protected_regular = 2 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +Package 'lxcfs' is not installed, so not removed +Package 'liblxc1' is not installed, so not removed +Package 'lxd' is not installed, so not removed +Package 'lxd-client' is not installed, so not removed +0 upgraded, 0 newly installed, 0 to remove and 18 not upgraded. +snap "lxd" is already installed, see 'snap help refresh' +To start your first container, try: lxc launch ubuntu:20.04 +Or for a virtual machine: lxc launch ubuntu:20.04 --vm + +Track prereq prereqok_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514180&event=prereq&operation=prereqok_ok&value=&comment=&tags= +DEBUG_INSTALL= +DOCKER_PROXY_URL= +USER=ubuntu +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20211016~20.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.99.9.8). +software-properties-common set to manually installed. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 18 not upgraded. +Need to get 1704 B of archives. +After this operation, 162 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 apt-transport-https all 2.0.9 [1704 B] +Fetched 1704 B in 0s (27.7 kB/s) +Selecting previously unselected package apt-transport-https. +(Reading database ... 64950 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_2.0.9_all.deb ... +Unpacking apt-transport-https (2.0.9) ... +Setting up apt-transport-https (2.0.9) ... +OK +Get:1 https://download.docker.com/linux/ubuntu focal InRelease [57.7 kB] +Hit:2 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 Packages [17.6 kB] +Fetched 75.2 kB in 1s (103 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + pigz slirp4netns +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin pigz slirp4netns +0 upgraded, 7 newly installed, 0 to remove and 18 not upgraded. +Need to get 102 MB of archives. +After this operation, 422 MB of additional disk space will be used. +Get:1 https://download.docker.com/linux/ubuntu focal/stable amd64 containerd.io amd64 1.6.6-1 [28.1 MB] +Get:2 http://archive.ubuntu.com/ubuntu focal/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal/universe amd64 slirp4netns amd64 0.4.3-1 [74.3 kB] +Get:4 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-cli amd64 5:20.10.17~3-0~ubuntu-focal [40.6 MB] +Get:5 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce amd64 5:20.10.17~3-0~ubuntu-focal [21.0 MB] +Get:6 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-rootless-extras amd64 5:20.10.17~3-0~ubuntu-focal [8171 kB] +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-scan-plugin amd64 0.17.0~ubuntu-focal [3521 kB] +Fetched 102 MB in 2s (43.3 MB/s) +Selecting previously unselected package pigz. +(Reading database ... 64954 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.6.6-1_amd64.deb ... +Unpacking containerd.io (1.6.6-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.17~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.17~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.17~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce (5:20.10.17~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.17~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.17.0~ubuntu-focal_amd64.deb ... +Unpacking docker-scan-plugin (0.17.0~ubuntu-focal) ... +Selecting previously unselected package slirp4netns. +Preparing to unpack .../6-slirp4netns_0.4.3-1_amd64.deb ... +Unpacking slirp4netns (0.4.3-1) ... +Setting up slirp4netns (0.4.3-1) ... +Setting up docker-scan-plugin (0.17.0~ubuntu-focal) ... +Setting up containerd.io (1.6.6-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-cli (5:20.10.17~3-0~ubuntu-focal) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-focal) ... +Setting up docker-ce (5:20.10.17~3-0~ubuntu-focal) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for systemd (245.4-4ubuntu3.17) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.17 + API version: 1.41 + Go version: go1.17.11 + Git commit: 100c701 + Built: Mon Jun 6 23:02:57 2022 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.17 + API version: 1.41 (minimum version 1.12) + Go version: go1.17.11 + Git commit: a89b842 + Built: Mon Jun 6 23:01:03 2022 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.6.6 + GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 + runc: + Version: 1.1.2 + GitCommit: v1.1.2-0-ga916309 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Track docker_ce docker_ce_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514228&event=docker_ce&operation=docker_ce_ok&value=&comment=&tags= +Creating folders for installation +DEBUG_INSTALL= +DEFAULT_IP=192.168.64.23 +OSM_DEVOPS=/usr/share/osm-devops +OSM_DOCKER_WORK_DIR=/etc/osm/docker +INSTALL_K8S_MONITOR= +HOME=/home/ubuntu +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:4 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:6 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (2.0.9). +0 upgraded, 0 newly installed, 0 to remove and 18 not upgraded. +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:3 https://download.docker.com/linux/ubuntu focal InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (2.0.9). +0 upgraded, 0 newly installed, 0 to remove and 18 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:3 https://download.docker.com/linux/ubuntu focal InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [57.2 kB] +Fetched 66.6 kB in 1s (50.0 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + conntrack cri-tools ebtables kubernetes-cni socat +Suggested packages: + nftables +The following NEW packages will be installed: + conntrack cri-tools ebtables kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 8 newly installed, 0 to remove and 18 not upgraded. +Need to get 74.7 MB of archives. +After this operation, 324 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal/main amd64 conntrack amd64 1:1.4.5-2 [30.3 kB] +Get:2 http://archive.ubuntu.com/ubuntu focal/main amd64 ebtables amd64 2.0.11-3build1 [80.3 kB] +Get:4 http://archive.ubuntu.com/ubuntu focal/main amd64 socat amd64 1.7.3.3-2 [323 kB] +Get:3 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.24.2-00 [12.3 MB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.23.3-00 [19.5 MB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.23.3-00 [8929 kB] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.23.3-00 [8580 kB] +Fetched 74.7 MB in 3s (25.2 MB/s) +Selecting previously unselected package conntrack. +(Reading database ... 65205 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.5-2_amd64.deb ... +Unpacking conntrack (1:1.4.5-2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.24.2-00_amd64.deb ... +Unpacking cri-tools (1.24.2-00) ... +Selecting previously unselected package ebtables. +Preparing to unpack .../2-ebtables_2.0.11-3build1_amd64.deb ... +Unpacking ebtables (2.0.11-3build1) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../3-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../4-socat_1.7.3.3-2_amd64.deb ... +Unpacking socat (1.7.3.3-2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../5-kubelet_1.23.3-00_amd64.deb ... +Unpacking kubelet (1.23.3-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../6-kubectl_1.23.3-00_amd64.deb ... +Unpacking kubectl (1.23.3-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../7-kubeadm_1.23.3-00_amd64.deb ... +Unpacking kubeadm (1.23.3-00) ... +Setting up conntrack (1:1.4.5-2) ... +Setting up kubectl (1.23.3-00) ... +Setting up ebtables (2.0.11-3build1) ... +Setting up socat (1.7.3.3-2) ... +Setting up cri-tools (1.24.2-00) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up kubelet (1.23.3-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubeadm (1.23.3-00) ... +Processing triggers for man-db (2.9.1-1) ... + KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs" +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +Track k8scluster install_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514269&event=k8scluster&operation=install_k8s_ok&value=&comment=&tags= +I0629 16:51:09.776257 10096 version.go:255] remote version is much newer: v1.24.2; falling back to: stable-1.23 +[init] Using Kubernetes version: v1.23.8 +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local osm11] and IPs [10.96.0.1 192.168.64.23] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [localhost osm11] and IPs [192.168.64.23 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [localhost osm11] and IPs [192.168.64.23 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Starting the kubelet +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 18.551122 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster +NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently. +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm11 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] +[mark-control-plane] Marking the node osm11 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: d2rz8j.istr2iz8tw0cgsnl +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +Alternatively, if you are the root user, you can run: + + export KUBECONFIG=/etc/kubernetes/admin.conf + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.23:6443 --token d2rz8j.istr2iz8tw0cgsnl \ + --discovery-token-ca-cert-hash sha256:49c0f16049c97cc76debdf07bfd3226fec7dae52f45a3a37a2dad24f2b07fca9 +Track k8scluster init_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514349&event=k8scluster&operation=init_k8s_ok&value=&comment=&tags= +Error from server (NotFound): namespaces "osm" not found +--2022-06-29 16:52:29-- https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml +Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... +Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 5750 (5.6K) [text/plain] +Saving to: ‘/tmp/flannel.56U8m6/kube-flannel.yml’ + + 0K ..... 100% 30.6M=0s + +2022-06-29 16:52:42 (30.6 MB/s) - ‘/tmp/flannel.56U8m6/kube-flannel.yml’ saved [5750/5750] + +Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds created +node/osm11 untainted +Helm3 is not installed, installing ... + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 13.2M 100 13.2M 0 0 31.1M 0 --:--:-- --:--:-- --:--:-- 31.1M +linux-amd64/ +linux-amd64/helm +linux-amd64/LICENSE +linux-amd64/README.md +"stable" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +Track k8scluster install_helm_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514382&event=k8scluster&operation=install_helm_ok&value=&comment=&tags= +Installing open-iscsi +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 https://download.docker.com/linux/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +open-iscsi is already the newest version (2.0.874-7.1ubuntu6.2). +open-iscsi set to manually installed. +0 upgraded, 0 newly installed, 0 to remove and 21 not upgraded. +Synchronizing state of iscsid.service with SysV service script with /lib/systemd/systemd-sysv-install. +Executing: /lib/systemd/systemd-sysv-install enable iscsid +Created symlink /etc/systemd/system/sysinit.target.wants/iscsid.service → /lib/systemd/system/iscsid.service. +Installing OpenEBS +"openebs" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "openebs" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +NAME: openebs +LAST DEPLOYED: Wed Jun 29 16:53:24 2022 +NAMESPACE: openebs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +Successfully installed OpenEBS. + +Check the status by running: kubectl get pods -n openebs + +The default values will install NDM and enable OpenEBS hostpath and device +storage engines along with their default StorageClasses. Use `kubectl get sc` +to see the list of installed OpenEBS StorageClasses. + +**Note**: If you are upgrading from the older helm chart that was using cStor +and Jiva (non-csi) volumes, you will have to run the following command to include +the older provisioners: + +helm upgrade openebs openebs/openebs \ + --namespace openebs \ + --set legacy.enabled=true \ + --reuse-values + +For other engines, you will need to perform a few more additional steps to +enable the engine, configure the engines (e.g. creating pools) and create +StorageClasses. + +For example, cStor can be enabled using commands like: + +helm upgrade openebs openebs/openebs \ + --namespace openebs \ + --set cstor.enabled=true \ + --reuse-values + +For more information, +- view the online documentation at https://openebs.io/docs or +- connect with an active community on Kubernetes slack #openebs channel. +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +openebs openebs 1 2022-06-29 16:53:24.471803904 +0200 CEST deployed openebs-3.1.0 3.1.0 +Waiting for storageclass +Storageclass available +storageclass.storage.k8s.io/openebs-hostpath patched +Track k8scluster k8s_storageclass_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514408&event=k8scluster&operation=k8s_storageclass_ok&value=&comment=&tags= +Installing MetalLB +configInline: + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.64.23/32 +"metallb" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "metallb" chart repository +...Successfully got an update from the "openebs" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +W0629 16:53:37.852952 13892 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0629 16:53:37.863932 13892 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0629 16:53:38.117988 13892 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0629 16:53:38.120086 13892 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +NAME: metallb +LAST DEPLOYED: Wed Jun 29 16:53:35 2022 +NAMESPACE: metallb-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +MetalLB is now running in the cluster. +LoadBalancer Services in your cluster are now available on the IPs you +defined in MetalLB's configuration: + +config: + address-pools: + - addresses: + - 192.168.64.23/32 + name: default + protocol: layer2 + +To see IP assignments, try `kubectl get services`. +Track k8scluster k8s_metallb_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514418&event=k8scluster&operation=k8s_metallb_ok&value=&comment=&tags= + +Bootstraping... 1 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-7dzc5 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 2 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-7dzc5 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 3 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-7dzc5 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 4 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-7dzc5 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 5 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 6 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 7 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 8 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 +openebs-ndm-operator-687cf9466c-ffqb7 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 9 checks of 100 +OpenEBS: Waiting for 1 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-4xjpm 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 10 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 11 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 12 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 13 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 14 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 15 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 16 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 17 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 18 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 19 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 20 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + + +Bootstraping... 21 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-c55c89d-mrpht 0/1 +metallb-speaker-czbtn 0/1 + +===> Successful checks: 10/10 +K8S CLUSTER IS READY +Track k8scluster k8s_ready_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514493&event=k8scluster&operation=k8s_ready_ok&value=&comment=&tags= +Track k8scluster k8scluster_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514494&event=k8scluster&operation=k8scluster_ok&value=&comment=&tags= +DEBUG_INSTALL= +DEFAULT_IP=192.168.64.23 +OSM_DEVOPS=/usr/share/osm-devops +HOME=/home/ubuntu +Installing juju +juju (2.9/stable) 2.9.32 from Canonical** installed +Finished installation of juju +Track juju juju_install_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656514895&event=juju&operation=juju_install_ok&value=&comment=&tags= +Since Juju 2 is being run for the first time, it has downloaded the latest public cloud information. + +k8s substrate added as cloud "k8scloud" with storage provisioned +by the existing "openebs-hostpath" storage class. +You can now bootstrap to this cloud by running 'juju bootstrap k8scloud'. +17:01:40 INFO juju.cmd supercommand.go:56 running juju [2.9.32 917a8f1033561ce28a73ff81d71da75aec6e0785 gc go1.18.3] +17:01:40 DEBUG juju.cmd supercommand.go:57 args: []string{"/snap/juju/19681/bin/juju", "bootstrap", "-v", "--debug", "k8scloud", "osm", "--config", "controller-service-type=loadbalancer", "--agent-version=2.9.29"} +17:01:40 DEBUG juju.cmd.juju.commands bootstrap.go:1307 authenticating with region "" and credential "k8scloud" () +17:01:40 DEBUG juju.cmd.juju.commands bootstrap.go:1455 provider attrs: map[operator-storage: workload-storage:] +17:01:41 INFO cmd authkeys.go:114 Adding contents of "/home/ubuntu/.local/share/juju/ssh/juju_id_rsa.pub" to authorized-keys +17:01:41 DEBUG juju.cmd.juju.commands bootstrap.go:1530 preparing controller with config: map[agent-metadata-url: agent-stream:released apt-ftp-proxy: apt-http-proxy: apt-https-proxy: apt-mirror: apt-no-proxy: authorized-keys:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDzgJ4FXAKQAbe2yzd3Tg31wSmxU9RNM75WymY8T89vm01Ms4JtJEEcgJ+aGNisdxPEWK8OFQQ1ovfjGiN8GvZSoHuqR7QvHeKtN4jKhP8yoNEYgDecVXK93HyEky9t0TY+XT42crEHVqCtDWfSKd4ZqwHrVmgnTYJEJ+8tjO9jJGTvbtoD+FxQRg5B3SHm+u0mhtnaLnOrlBIgxCMWBDK7Zsv1ESNKt2WUSAWfDpfbMhYKAUab6HreXvsBq9dNGmXd1nSan+9HW/auziPoL42mT6PTZSAgWQsd2SpowmeLsSDw7mpmgZkLUSBtNB5WEufdfoECTpw3amt1rAHAJKDb juju-client-key + automatically-retry-hooks:true backup-dir: charmhub-url:https://api.charmhub.io cloudinit-userdata: container-image-metadata-url: container-image-stream:released container-inherit-properties: container-networking-method: default-series:focal default-space: development:false disable-network-management:false disable-telemetry:false egress-subnets: enable-os-refresh-update:true enable-os-upgrade:true fan-config: firewall-mode:instance ftp-proxy: http-proxy: https-proxy: ignore-machine-addresses:false image-metadata-url: image-stream:released juju-ftp-proxy: juju-http-proxy: juju-https-proxy: juju-no-proxy:127.0.0.1,localhost,::1 logforward-enabled:false logging-config: logging-output: lxd-snap-channel:latest/stable max-action-results-age:336h max-action-results-size:5G max-status-history-age:336h max-status-history-size:5G name:controller net-bond-reconfigure-delay:17 no-proxy:127.0.0.1,localhost,::1 num-container-provision-workers:4 num-provision-workers:16 operator-storage:openebs-hostpath provisioner-harvest-mode:destroyed proxy-ssh:false resource-tags: snap-http-proxy: snap-https-proxy: snap-store-assertions: snap-store-proxy: snap-store-proxy-url: ssl-hostname-verification:true test-mode:false transmit-vendor-metrics:true type:kubernetes update-status-hook-interval:5m uuid:caca7a31-d59f-405c-844f-ef84e708f3b5 workload-storage:openebs-hostpath] +17:01:41 DEBUG juju.kubernetes.provider provider.go:140 opening model "controller". +17:01:41 INFO cmd bootstrap.go:855 Creating Juju controller "osm" on k8scloud +17:01:41 INFO juju.cmd.juju.commands bootstrap.go:921 combined bootstrap constraints: +17:01:41 INFO cmd bootstrap.go:969 Bootstrap to generic Kubernetes cluster +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:423 searching for signed metadata in datasource "gui simplestreams" +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:458 looking for data index using path streams/v1/index2.sjson +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:470 looking for data index using URL https://streams.canonical.com/juju/gui/streams/v1/index2.sjson +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:473 streams/v1/index2.sjson not accessed, actual error: [{/build/snapcraft-juju-a284566302ade03f36071a6fe755224b/parts/juju/src/environs/simplestreams/datasource.go:192: "https://streams.canonical.com/juju/gui/streams/v1/index2.sjson" not found}] +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:474 streams/v1/index2.sjson not accessed, trying legacy index path: streams/v1/index.sjson +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:493 read metadata index at "https://streams.canonical.com/juju/gui/streams/v1/index.sjson" +17:01:42 DEBUG juju.environs.simplestreams simplestreams.go:1025 finding products at path "streams/v1/com.canonical.streams-released-dashboard.sjson" +17:01:42 INFO cmd bootstrap.go:864 Fetching Juju Dashboard 0.8.1 +17:01:42 DEBUG juju.kubernetes.provider k8s.go:470 controller pod config: +&{Tags:map[] Bootstrap:0xc000365880 DisableSSLHostnameVerification:false ProxySettings:{Http: Https: Ftp: NoProxy:127.0.0.1,localhost,::1 AutoNoProxy:} Controller:map[agent-logfile-max-backups:2 agent-logfile-max-size:100M api-port:17070 api-port-open-delay:2s audit-log-capture-args:false audit-log-exclude-methods:[ReadOnlyMethods] audit-log-max-backups:10 audit-log-max-size:300M auditing-enabled:true batch-raft-fsm:false ca-cert:-----BEGIN CERTIFICATE----- +MIIEEzCCAnugAwIBAgIVAKUNNMoHI67FvYlpb18lbyE1BJyDMA0GCSqGSIb3DQEB +CwUAMCExDTALBgNVBAoTBEp1anUxEDAOBgNVBAMTB2p1anUtY2EwHhcNMjIwNjI5 +MTQ1NjQxWhcNMzIwNjI5MTUwMTQxWjAhMQ0wCwYDVQQKEwRKdWp1MRAwDgYDVQQD +EwdqdWp1LWNhMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA5A/tDo09 +9QCo/xEo3d41Rq0sHP/7OR8Qaynfo7/95+S/roFiOf99vENd3l/eA82K9glR/Td0 +bFEm2l/SwX63fan/Y7PVxSXr5d/G0nhDORfQSzn61fLJwli1vDHFGordIKBPWBhX +vTdfETqiTZGT4IaXjT4+aKfAyWPhI8KDAmacuJ80aANO/7jML7wTgdq0aSfQKfmK +OGJqngi2vxifHrNpCfMBRl9/L5Mbuw9ytXsBhSrPBINrk81hq2bAHVj7NvfPfPue +a2UBb788kMP+xonGhN+8pUZooA3HFMp3JCVd9jbNDh/M3u2tRZbrFMsER3xV+Aqg +i6I3y8xLFNSb11hPbme3YpuoSyH/+ZDCojTcXhmU6ufpyAACWeCMgVBsFJK76CC6 +vdNAsL1ueVKB3DWlyh4phoWmMPb0vJUIuG3h1QDM2bmSD6JTPbA0SrApJpiqMbPy +zA9Fgfla2R1R35+r8EMNVpdVLG3dS29tx5S07E2Xu1ZrHhaNMd76VjBHAgMBAAGj +QjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSY +5jTQ56RTXNqF89m4Q8R2hSfzUTANBgkqhkiG9w0BAQsFAAOCAYEA3iS5qNRamawK +p5wLPxJ9wQdkSozjQGqlvzuDrujDgt3RZ1Kiu1neCTLczLwINMbahfZ79NQoamqJ +3juk+GPorrgDJ3N3xdXhm23O3zPMH6owH2pHc8ev0+RkqLyNgnFav3NKRK53A7Zf +nGM0g39aTTAv6SBuVGFowXfM7LR/eS8WUyvQFTbqHlmpRVYKCW7Yo2FDxvRa9eC+ +LlRuwt1oTJ1PwjQIhbiE+5tfB944z8sfXjLSWlWuqnTpi9O2nL3ERVblp5kC8uAT +SSyEygDOxtI4eNChKkwkNsNFRCyOCNVHv/oBW/LpwplbGZbuht8M1ygBR19bGIVK +XSkZMKTIrrw6V6JJ+RsPOxWM2ehtKGt3pLIH1XjVprxNB1zwGvG0VDLkmAlD63bP +94mRhrs3vWDimIkIPZPKinvuimPstlujq4soQqkwc+LBiHKC8V0sBde2grdu4aEJ +dWiB6Qfc6rfRqzWY8EaeWlg/G7D5qscwc/RlEHO/3TS7ob+PEAFh +-----END CERTIFICATE----- + charmstore-url:https://api.jujucharms.com/charmstore controller-name:osm controller-uuid:15f06f99-4843-41ca-8bef-04a4af311293 juju-db-snap-channel:4.4/stable max-agent-state-size:524288 max-charm-state-size:2097152 max-debug-log-duration:24h0m0s max-prune-txn-batch-size:1000000 max-prune-txn-passes:100 max-txn-log-size:10M metering-url:https://api.jujucharms.com/omnibus/v3 migration-agent-wait-time:15m model-logfile-max-backups:2 model-logfile-max-size:10M model-logs-size:20M mongo-memory-profile:default non-synced-writes-to-raft-log:false prune-txn-query-count:1000 prune-txn-sleep-time:10ms set-numa-control-policy:false state-port:37017] APIInfo:0xc000211740 ControllerTag:controller-15f06f99-4843-41ca-8bef-04a4af311293 ControllerName:osm JujuVersion:2.9.29 DataDir:/var/lib/juju LogDir:/var/log/juju MetricsSpoolDir:/var/lib/juju/metricspool ControllerId:0 AgentEnvironment:map[PROVIDER_TYPE:kubernetes]} +17:01:42 INFO cmd bootstrap.go:394 Creating k8s resources for controller "controller-osm" +17:01:42 DEBUG juju.kubernetes.provider bootstrap.go:627 creating controller service: +&Service{ObjectMeta:{controller-service controller-osm 0 0001-01-01 00:00:00 +0000 UTC map[app.kubernetes.io/managed-by:juju app.kubernetes.io/name:controller] map[controller.juju.is/id:15f06f99-4843-41ca-8bef-04a4af311293] [] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:api-server,Protocol:,Port:17070,TargetPort:{0 17070 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app.kubernetes.io/name: controller,},ClusterIP:,Type:LoadBalancer,ExternalIPs:[],SessionAffinity:,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:nil,ClusterIPs:[],IPFamilies:[],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},} +17:01:43 DEBUG juju.kubernetes.provider bootstrap.go:660 polling k8s controller svc DNS, in 1 attempt, controller service address not provisioned +17:01:46 DEBUG juju.kubernetes.provider configmap.go:84 updating configmap "controller-configmap" +17:01:47 DEBUG juju.kubernetes.provider configmap.go:84 updating configmap "controller-configmap" +17:01:47 DEBUG juju.kubernetes.provider bootstrap.go:1207 mongodb container args: +printf 'args="--dbpath=/var/lib/juju/db --sslPEMKeyFile=/var/lib/juju/server.pem --sslPEMKeyPassword=ignored --sslMode=requireSSL --port=37017 --journal --replSet=juju --quiet --oplogSize=1024 --auth --keyFile=/var/lib/juju/shared-secret --storageEngine=wiredTiger --bind_ip_all"\nipv6Disabled=$(sysctl net.ipv6.conf.all.disable_ipv6 -n)\nif [ $ipv6Disabled -eq 0 ]; then\n args="${args} --ipv6"\nfi\n$(mongod ${args})\n'>/root/mongo.sh && chmod a+x /root/mongo.sh && /root/mongo.sh +17:01:47 DEBUG juju.kubernetes.provider k8s.go:2252 selecting units "app.kubernetes.io/name=controller" to watch +17:01:48 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:01:48 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller +17:02:00 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller +17:02:00 DEBUG juju.kubernetes.provider bootstrap.go:957 Successfully assigned controller-osm/controller-0 to osm11 +17:02:00 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:02 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:02 DEBUG juju.kubernetes.provider bootstrap.go:957 Downloading images +17:02:02 INFO cmd bootstrap.go:959 Downloading images +17:02:18 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:19 DEBUG juju.kubernetes.provider bootstrap.go:957 Pulled images +17:02:20 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:20 DEBUG juju.kubernetes.provider bootstrap.go:957 Created container mongodb +17:02:20 DEBUG juju.kubernetes.provider bootstrap.go:957 Started mongodb container +17:02:52 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:55 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller-0 +17:02:55 DEBUG juju.kubernetes.provider bootstrap.go:957 Created container api-server +17:02:55 DEBUG juju.kubernetes.provider bootstrap.go:957 Started controller container +17:02:56 DEBUG juju.kubernetes.provider.watcher k8swatcher.go:114 fire notify watcher for controller +17:02:57 INFO cmd bootstrap.go:1045 Starting controller pod +17:02:57 INFO cmd bootstrap.go:708 Bootstrap agent now started +17:02:57 INFO juju.juju api.go:330 API endpoints changed from [] to [192.168.64.23:17070] +17:02:57 INFO cmd controller.go:88 Contacting Juju controller at 192.168.64.23 to verify accessibility... +17:02:57 INFO juju.juju api.go:78 connecting to API addresses: [192.168.64.23:17070] +17:03:02 INFO cmd controller.go:141 Still waiting for API to become available: unable to connect to API: dial tcp 192.168.64.23:17070: connect: connection refused +17:03:05 INFO juju.juju api.go:78 connecting to API addresses: [192.168.64.23:17070] +17:03:10 INFO cmd controller.go:141 Still waiting for API to become available: unable to connect to API: dial tcp 192.168.64.23:17070: connect: connection refused +17:03:13 INFO juju.juju api.go:78 connecting to API addresses: [192.168.64.23:17070] +17:03:13 DEBUG juju.api apiclient.go:1153 successfully dialed "wss://192.168.64.23:17070/model/caca7a31-d59f-405c-844f-ef84e708f3b5/api" +17:03:13 INFO juju.api apiclient.go:688 connection established to "wss://192.168.64.23:17070/model/caca7a31-d59f-405c-844f-ef84e708f3b5/api" +17:03:13 DEBUG juju.api monitor.go:35 RPC connection died +17:03:13 INFO cmd controller.go:108 +Bootstrap complete, controller "osm" is now available in namespace "controller-osm" +17:03:13 INFO cmd bootstrap.go:596 +Now you can run + juju add-model +to create a new model to deploy k8s workloads. +17:03:13 INFO cmd supercommand.go:544 command finished +Generating a RSA private key +..........................+++++ +...........................................+++++ +writing new private key to '/tmp/.osm/client.key' +----- +Cloud "lxd-cloud" added to controller "osm". +WARNING loading credentials: credentials for cloud lxd-cloud not found +To upload a credential to the controller for cloud "lxd-cloud", use +* 'add-model' with --credential option or +* 'add-credential -c lxd-cloud'. +Using cloud "lxd-cloud" from the controller to verify credentials. +Controller credential "lxd-cloud" for user "admin" for cloud "lxd-cloud" on controller "osm" added. +For more information, see ‘juju show-credential lxd-cloud lxd-cloud’. + +Checking required packages: iptables-persistent + Not installed. +Installing iptables-persistent requires root privileges +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + netfilter-persistent +The following NEW packages will be installed: + iptables-persistent netfilter-persistent +0 upgraded, 2 newly installed, 0 to remove and 21 not upgraded. +Need to get 13.8 kB of archives. +After this operation, 89.1 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 netfilter-persistent all 1.0.14ubuntu1 [7268 B] +Get:2 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 iptables-persistent all 1.0.14ubuntu1 [6552 B] +Preconfiguring packages ... +Fetched 13.8 kB in 0s (38.8 kB/s) +Selecting previously unselected package netfilter-persistent. +(Reading database ... 65298 files and directories currently installed.) +Preparing to unpack .../netfilter-persistent_1.0.14ubuntu1_all.deb ... +Unpacking netfilter-persistent (1.0.14ubuntu1) ... +Selecting previously unselected package iptables-persistent. +Preparing to unpack .../iptables-persistent_1.0.14ubuntu1_all.deb ... +Unpacking iptables-persistent (1.0.14ubuntu1) ... +Setting up netfilter-persistent (1.0.14ubuntu1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/netfilter-persistent.service → /lib/systemd/system/netfilter-persistent.service. +Setting up iptables-persistent (1.0.14ubuntu1) ... +update-alternatives: using /lib/systemd/system/netfilter-persistent.service to provide /lib/systemd/system/iptables.service (iptables.service) in auto mode +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for systemd (245.4-4ubuntu3.17) ... +iptables v1.8.4 (legacy): option "--to-destination" requires an argument +Try `iptables -h' or 'iptables --help' for more information. +iptables v1.8.4 (legacy): option "--to-destination" requires an argument +Try `iptables -h' or 'iptables --help' for more information. +run-parts: executing /usr/share/netfilter-persistent/plugins.d/15-ip4tables save +run-parts: executing /usr/share/netfilter-persistent/plugins.d/25-ip6tables save +Track juju juju_controller_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515019&event=juju&operation=juju_controller_ok&value=&comment=&tags= +Track juju juju_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515021&event=juju&operation=juju_ok&value=&comment=&tags= +Pulling and generating docker images +Pulling docker images +Using default tag: latest +latest: Pulling from wurstmeister/zookeeper +a3ed95caeb02: Pulling fs layer +ef38b711a50f: Pulling fs layer +e057c74597c7: Pulling fs layer +666c214f6385: Pulling fs layer +c3d6a96f1ffc: Pulling fs layer +3fe26a83e0ca: Pulling fs layer +3d3a7dd3a3b1: Pulling fs layer +f8cc938abe5f: Pulling fs layer +9978b75f7a58: Pulling fs layer +4d4dbcc8f8cc: Pulling fs layer +8b130a9baa49: Pulling fs layer +6b9611650a73: Pulling fs layer +5df5aac51927: Pulling fs layer +76eea4448d9b: Pulling fs layer +8b66990876c6: Pulling fs layer +f0dd38204b6f: Pulling fs layer +666c214f6385: Waiting +c3d6a96f1ffc: Waiting +3fe26a83e0ca: Waiting +3d3a7dd3a3b1: Waiting +f8cc938abe5f: Waiting +9978b75f7a58: Waiting +4d4dbcc8f8cc: Waiting +8b130a9baa49: Waiting +6b9611650a73: Waiting +5df5aac51927: Waiting +76eea4448d9b: Waiting +8b66990876c6: Waiting +f0dd38204b6f: Waiting +a3ed95caeb02: Download complete +e057c74597c7: Verifying Checksum +e057c74597c7: Download complete +a3ed95caeb02: Pull complete +666c214f6385: Verifying Checksum +666c214f6385: Download complete +c3d6a96f1ffc: Verifying Checksum +c3d6a96f1ffc: Download complete +3fe26a83e0ca: Verifying Checksum +3fe26a83e0ca: Download complete +f8cc938abe5f: Verifying Checksum +f8cc938abe5f: Download complete +9978b75f7a58: Verifying Checksum +9978b75f7a58: Download complete +4d4dbcc8f8cc: Verifying Checksum +4d4dbcc8f8cc: Download complete +ef38b711a50f: Verifying Checksum +ef38b711a50f: Download complete +6b9611650a73: Verifying Checksum +6b9611650a73: Download complete +8b130a9baa49: Verifying Checksum +8b130a9baa49: Download complete +76eea4448d9b: Verifying Checksum +76eea4448d9b: Download complete +5df5aac51927: Verifying Checksum +5df5aac51927: Download complete +8b66990876c6: Download complete +f0dd38204b6f: Verifying Checksum +f0dd38204b6f: Download complete +3d3a7dd3a3b1: Verifying Checksum +3d3a7dd3a3b1: Download complete +ef38b711a50f: Pull complete +e057c74597c7: Pull complete +666c214f6385: Pull complete +c3d6a96f1ffc: Pull complete +3fe26a83e0ca: Pull complete +3d3a7dd3a3b1: Pull complete +f8cc938abe5f: Pull complete +9978b75f7a58: Pull complete +4d4dbcc8f8cc: Pull complete +8b130a9baa49: Pull complete +6b9611650a73: Pull complete +5df5aac51927: Pull complete +76eea4448d9b: Pull complete +8b66990876c6: Pull complete +f0dd38204b6f: Pull complete +Digest: sha256:7a7fd44a72104bfbd24a77844bad5fabc86485b036f988ea927d1780782a6680 +Status: Downloaded newer image for wurstmeister/zookeeper:latest +docker.io/wurstmeister/zookeeper:latest +2.11-1.0.2: Pulling from wurstmeister/kafka +540db60ca938: Pulling fs layer +f0698009749d: Pulling fs layer +6f908e2198d8: Pulling fs layer +03d1b1f23ba0: Pulling fs layer +7e646c44bafc: Pulling fs layer +7e646c44bafc: Waiting +6f908e2198d8: Verifying Checksum +6f908e2198d8: Download complete +540db60ca938: Verifying Checksum +540db60ca938: Download complete +7e646c44bafc: Verifying Checksum +7e646c44bafc: Download complete +540db60ca938: Pull complete +f0698009749d: Verifying Checksum +f0698009749d: Download complete +03d1b1f23ba0: Verifying Checksum +03d1b1f23ba0: Download complete +f0698009749d: Pull complete +6f908e2198d8: Pull complete +03d1b1f23ba0: Pull complete +7e646c44bafc: Pull complete +Digest: sha256:ec1098369b4ccea77489b233172789c8ac29b545b9243545386549c52d07785b +Status: Downloaded newer image for wurstmeister/kafka:2.11-1.0.2 +docker.io/wurstmeister/kafka:2.11-1.0.2 +v2.4.3: Pulling from prom/prometheus +8c5a7da1afbc: Pulling fs layer +cab0dd93492f: Pulling fs layer +21399993eeff: Pulling fs layer +50fe4c0f18ae: Pulling fs layer +bab8b4ed1fef: Pulling fs layer +5bd3aaf93e52: Pulling fs layer +ccb08d41de26: Pulling fs layer +51047cce385d: Pulling fs layer +51c32e598e5c: Pulling fs layer +50fe4c0f18ae: Waiting +bab8b4ed1fef: Waiting +5bd3aaf93e52: Waiting +ccb08d41de26: Waiting +51047cce385d: Waiting +51c32e598e5c: Waiting +8c5a7da1afbc: Verifying Checksum +8c5a7da1afbc: Download complete +cab0dd93492f: Verifying Checksum +cab0dd93492f: Download complete +8c5a7da1afbc: Pull complete +bab8b4ed1fef: Verifying Checksum +bab8b4ed1fef: Download complete +21399993eeff: Verifying Checksum +21399993eeff: Download complete +50fe4c0f18ae: Verifying Checksum +50fe4c0f18ae: Download complete +5bd3aaf93e52: Download complete +ccb08d41de26: Verifying Checksum +ccb08d41de26: Download complete +cab0dd93492f: Pull complete +51047cce385d: Download complete +51c32e598e5c: Verifying Checksum +51c32e598e5c: Download complete +21399993eeff: Pull complete +50fe4c0f18ae: Pull complete +bab8b4ed1fef: Pull complete +5bd3aaf93e52: Pull complete +ccb08d41de26: Pull complete +51047cce385d: Pull complete +51c32e598e5c: Pull complete +Digest: sha256:2d79525389d68a309db843c1888f364823afbbef32ffea4741024d2ab9994dd6 +Status: Downloaded newer image for prom/prometheus:v2.4.3 +docker.io/prom/prometheus:v2.4.3 +latest: Pulling from google/cadvisor +ff3a5c916c92: Pulling fs layer +44a45bb65cdf: Pulling fs layer +0bbe1a2fe2a6: Pulling fs layer +ff3a5c916c92: Download complete +44a45bb65cdf: Verifying Checksum +44a45bb65cdf: Download complete +0bbe1a2fe2a6: Verifying Checksum +0bbe1a2fe2a6: Download complete +ff3a5c916c92: Pull complete +44a45bb65cdf: Pull complete +0bbe1a2fe2a6: Pull complete +Digest: sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04 +Status: Downloaded newer image for google/cadvisor:latest +docker.io/google/cadvisor:latest +latest: Pulling from grafana/grafana +df9b9388f04a: Pulling fs layer +484d0388b206: Pulling fs layer +276fa003c54d: Pulling fs layer +bcdfa28a7179: Pulling fs layer +1025160bed9b: Pulling fs layer +c459cd126d0c: Pulling fs layer +8ff228114e1e: Pulling fs layer +9c8142accf14: Pulling fs layer +582e6642725d: Pulling fs layer +bcdfa28a7179: Waiting +1025160bed9b: Waiting +c459cd126d0c: Waiting +8ff228114e1e: Waiting +9c8142accf14: Waiting +582e6642725d: Waiting +484d0388b206: Verifying Checksum +484d0388b206: Download complete +276fa003c54d: Verifying Checksum +276fa003c54d: Download complete +df9b9388f04a: Verifying Checksum +bcdfa28a7179: Verifying Checksum +bcdfa28a7179: Download complete +df9b9388f04a: Pull complete +1025160bed9b: Verifying Checksum +1025160bed9b: Download complete +484d0388b206: Pull complete +8ff228114e1e: Verifying Checksum +8ff228114e1e: Download complete +9c8142accf14: Verifying Checksum +9c8142accf14: Download complete +582e6642725d: Verifying Checksum +582e6642725d: Download complete +276fa003c54d: Pull complete +bcdfa28a7179: Pull complete +c459cd126d0c: Verifying Checksum +c459cd126d0c: Download complete +1025160bed9b: Pull complete +c459cd126d0c: Pull complete +8ff228114e1e: Pull complete +9c8142accf14: Pull complete +582e6642725d: Pull complete +Digest: sha256:49f5db3e80621196cb1d40bca90d51dc8f521734231ad5fa99e8561525df5ea9 +Status: Downloaded newer image for grafana/grafana:latest +docker.io/grafana/grafana:latest +10: Pulling from library/mariadb +405f018f9d1d: Pulling fs layer +7a85079b8234: Pulling fs layer +579c7ff691b1: Pulling fs layer +4976663b5d6d: Pulling fs layer +169024b1fb13: Pulling fs layer +c0ffe8ce897f: Pulling fs layer +b583c09d23c3: Pulling fs layer +9b9f0c08d08f: Pulling fs layer +9cd51f984586: Pulling fs layer +d9f506bb8aca: Pulling fs layer +24d689f79ba4: Pulling fs layer +4976663b5d6d: Waiting +169024b1fb13: Waiting +c0ffe8ce897f: Waiting +b583c09d23c3: Waiting +9b9f0c08d08f: Waiting +9cd51f984586: Waiting +d9f506bb8aca: Waiting +24d689f79ba4: Waiting +7a85079b8234: Verifying Checksum +7a85079b8234: Download complete +579c7ff691b1: Verifying Checksum +579c7ff691b1: Download complete +4976663b5d6d: Verifying Checksum +4976663b5d6d: Download complete +169024b1fb13: Verifying Checksum +169024b1fb13: Download complete +b583c09d23c3: Verifying Checksum +b583c09d23c3: Download complete +c0ffe8ce897f: Verifying Checksum +c0ffe8ce897f: Download complete +405f018f9d1d: Verifying Checksum +405f018f9d1d: Download complete +9b9f0c08d08f: Verifying Checksum +9b9f0c08d08f: Download complete +d9f506bb8aca: Verifying Checksum +d9f506bb8aca: Download complete +24d689f79ba4: Verifying Checksum +24d689f79ba4: Download complete +9cd51f984586: Verifying Checksum +9cd51f984586: Download complete +405f018f9d1d: Pull complete +7a85079b8234: Pull complete +579c7ff691b1: Pull complete +4976663b5d6d: Pull complete +169024b1fb13: Pull complete +c0ffe8ce897f: Pull complete +b583c09d23c3: Pull complete +9b9f0c08d08f: Pull complete +9cd51f984586: Pull complete +d9f506bb8aca: Pull complete +24d689f79ba4: Pull complete +Digest: sha256:88fcb7d92c7f61cd885c4d309c98461f3607aa6dbd57a2474be86e1956b36d13 +Status: Downloaded newer image for mariadb:10 +docker.io/library/mariadb:10 +5: Pulling from library/mysql +824b15f81d65: Pulling fs layer +c559dd1913db: Pulling fs layer +e201c19614e6: Pulling fs layer +f4247e8f6125: Pulling fs layer +dc9fefd8cfb5: Pulling fs layer +af3787edd16d: Pulling fs layer +b6bb40f875d3: Pulling fs layer +09914736f6f7: Pulling fs layer +32c835958ed8: Pulling fs layer +faa6834c9208: Pulling fs layer +ecf3b0798493: Pulling fs layer +af3787edd16d: Waiting +b6bb40f875d3: Waiting +09914736f6f7: Waiting +32c835958ed8: Waiting +faa6834c9208: Waiting +f4247e8f6125: Waiting +ecf3b0798493: Waiting +dc9fefd8cfb5: Waiting +c559dd1913db: Download complete +e201c19614e6: Verifying Checksum +e201c19614e6: Download complete +f4247e8f6125: Verifying Checksum +f4247e8f6125: Download complete +dc9fefd8cfb5: Verifying Checksum +dc9fefd8cfb5: Download complete +824b15f81d65: Verifying Checksum +824b15f81d65: Download complete +b6bb40f875d3: Verifying Checksum +b6bb40f875d3: Download complete +09914736f6f7: Verifying Checksum +09914736f6f7: Download complete +faa6834c9208: Verifying Checksum +faa6834c9208: Download complete +af3787edd16d: Verifying Checksum +af3787edd16d: Download complete +ecf3b0798493: Verifying Checksum +ecf3b0798493: Download complete +32c835958ed8: Verifying Checksum +32c835958ed8: Download complete +824b15f81d65: Pull complete +c559dd1913db: Pull complete +e201c19614e6: Pull complete +f4247e8f6125: Pull complete +dc9fefd8cfb5: Pull complete +af3787edd16d: Pull complete +b6bb40f875d3: Pull complete +09914736f6f7: Pull complete +32c835958ed8: Pull complete +faa6834c9208: Pull complete +ecf3b0798493: Pull complete +Digest: sha256:8b4b41d530c40d77a3205c53f7ecf1026d735648d9a09777845f305953e5eff5 +Status: Downloaded newer image for mysql:5 +docker.io/library/mysql:5 +Pulling OSM docker images +Pulling opensourcemano/mon:11 docker image +11: Pulling from opensourcemano/mon +d7bfe07ed847: Pulling fs layer +628bcfb499cb: Pulling fs layer +f5d59c25a781: Pulling fs layer +65c18c0e9bc7: Pulling fs layer +95d35ecb307c: Pulling fs layer +1e6d883dc6c0: Pulling fs layer +31169b28e3a1: Pulling fs layer +1df42af22e58: Pulling fs layer +051f40475366: Pulling fs layer +9fc84f0eb0db: Pulling fs layer +96b4c0ab0b16: Pulling fs layer +e0bbc20f517a: Pulling fs layer +cf3a201b670a: Pulling fs layer +d4d1d239a147: Pulling fs layer +65c18c0e9bc7: Waiting +95d35ecb307c: Waiting +1e6d883dc6c0: Waiting +31169b28e3a1: Waiting +1df42af22e58: Waiting +051f40475366: Waiting +9fc84f0eb0db: Waiting +96b4c0ab0b16: Waiting +e0bbc20f517a: Waiting +cf3a201b670a: Waiting +d4d1d239a147: Waiting +628bcfb499cb: Verifying Checksum +628bcfb499cb: Download complete +65c18c0e9bc7: Verifying Checksum +65c18c0e9bc7: Download complete +f5d59c25a781: Verifying Checksum +f5d59c25a781: Download complete +d7bfe07ed847: Verifying Checksum +d7bfe07ed847: Download complete +95d35ecb307c: Verifying Checksum +95d35ecb307c: Download complete +31169b28e3a1: Verifying Checksum +31169b28e3a1: Download complete +1df42af22e58: Verifying Checksum +1df42af22e58: Download complete +051f40475366: Verifying Checksum +051f40475366: Download complete +9fc84f0eb0db: Verifying Checksum +9fc84f0eb0db: Download complete +1e6d883dc6c0: Verifying Checksum +1e6d883dc6c0: Download complete +cf3a201b670a: Verifying Checksum +cf3a201b670a: Download complete +d4d1d239a147: Verifying Checksum +d4d1d239a147: Download complete +96b4c0ab0b16: Verifying Checksum +96b4c0ab0b16: Download complete +e0bbc20f517a: Verifying Checksum +e0bbc20f517a: Download complete +d7bfe07ed847: Pull complete +628bcfb499cb: Pull complete +f5d59c25a781: Pull complete +65c18c0e9bc7: Pull complete +95d35ecb307c: Pull complete +1e6d883dc6c0: Pull complete +31169b28e3a1: Pull complete +1df42af22e58: Pull complete +051f40475366: Pull complete +9fc84f0eb0db: Pull complete +96b4c0ab0b16: Pull complete +e0bbc20f517a: Pull complete +cf3a201b670a: Pull complete +d4d1d239a147: Pull complete +Digest: sha256:6b962cb796a6aa354acf41a1c7690cc679116ce81bcaa1c038d18bf3e205d5ab +Status: Downloaded newer image for opensourcemano/mon:11 +docker.io/opensourcemano/mon:11 +Pulling opensourcemano/pol:11 docker image +11: Pulling from opensourcemano/pol +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +f5d59c25a781: Already exists +023bfe08cfe4: Pulling fs layer +3d92603162cd: Pulling fs layer +1f7bef05651a: Pulling fs layer +42f92e41a472: Pulling fs layer +4f40fec3a1a8: Pulling fs layer +14ac7edfbdfb: Pulling fs layer +d32615c73813: Pulling fs layer +fe785bd21ccf: Pulling fs layer +9880f1bb2989: Pulling fs layer +0d1094142c2c: Pulling fs layer +14ac7edfbdfb: Waiting +d32615c73813: Waiting +fe785bd21ccf: Waiting +9880f1bb2989: Waiting +0d1094142c2c: Waiting +42f92e41a472: Waiting +4f40fec3a1a8: Waiting +1f7bef05651a: Verifying Checksum +1f7bef05651a: Download complete +023bfe08cfe4: Verifying Checksum +023bfe08cfe4: Download complete +3d92603162cd: Verifying Checksum +3d92603162cd: Download complete +42f92e41a472: Verifying Checksum +42f92e41a472: Download complete +14ac7edfbdfb: Verifying Checksum +14ac7edfbdfb: Download complete +023bfe08cfe4: Pull complete +4f40fec3a1a8: Download complete +d32615c73813: Verifying Checksum +d32615c73813: Download complete +fe785bd21ccf: Verifying Checksum +fe785bd21ccf: Download complete +9880f1bb2989: Verifying Checksum +9880f1bb2989: Download complete +0d1094142c2c: Verifying Checksum +0d1094142c2c: Download complete +3d92603162cd: Pull complete +1f7bef05651a: Pull complete +42f92e41a472: Pull complete +4f40fec3a1a8: Pull complete +14ac7edfbdfb: Pull complete +d32615c73813: Pull complete +fe785bd21ccf: Pull complete +9880f1bb2989: Pull complete +0d1094142c2c: Pull complete +Digest: sha256:1829ff2616d76063b7041c83b7f941bb80b2a7f1b441d0dc3d34bb82518214fc +Status: Downloaded newer image for opensourcemano/pol:11 +docker.io/opensourcemano/pol:11 +Pulling opensourcemano/nbi:11 docker image +11: Pulling from opensourcemano/nbi +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +f5d59c25a781: Already exists +7e7ae4d30d66: Pulling fs layer +32de122b1682: Pulling fs layer +de22c6c9c40e: Pulling fs layer +e955c360e79e: Pulling fs layer +85c8e90871ea: Pulling fs layer +238bf71d7e56: Pulling fs layer +e955c360e79e: Waiting +85c8e90871ea: Waiting +238bf71d7e56: Waiting +de22c6c9c40e: Verifying Checksum +de22c6c9c40e: Download complete +7e7ae4d30d66: Verifying Checksum +7e7ae4d30d66: Download complete +e955c360e79e: Verifying Checksum +e955c360e79e: Download complete +32de122b1682: Verifying Checksum +32de122b1682: Download complete +85c8e90871ea: Verifying Checksum +85c8e90871ea: Download complete +238bf71d7e56: Verifying Checksum +238bf71d7e56: Download complete +7e7ae4d30d66: Pull complete +32de122b1682: Pull complete +de22c6c9c40e: Pull complete +e955c360e79e: Pull complete +85c8e90871ea: Pull complete +238bf71d7e56: Pull complete +Digest: sha256:a7634ab1694d16937c4f9e912358f7a89aa25f6ca31eee45358e5b96b26e7e61 +Status: Downloaded newer image for opensourcemano/nbi:11 +docker.io/opensourcemano/nbi:11 +Pulling opensourcemano/keystone:11 docker image +11: Pulling from opensourcemano/keystone +d7bfe07ed847: Already exists +cb4119dd599e: Pulling fs layer +0dafd027e807: Pulling fs layer +17fcf4f59750: Pulling fs layer +b1be52d2d47d: Pulling fs layer +6792f7885d6f: Pulling fs layer +b1be52d2d47d: Waiting +6792f7885d6f: Waiting +cb4119dd599e: Verifying Checksum +cb4119dd599e: Download complete +0dafd027e807: Verifying Checksum +0dafd027e807: Download complete +cb4119dd599e: Pull complete +6792f7885d6f: Verifying Checksum +6792f7885d6f: Download complete +0dafd027e807: Pull complete +b1be52d2d47d: Verifying Checksum +b1be52d2d47d: Download complete +17fcf4f59750: Verifying Checksum +17fcf4f59750: Download complete +17fcf4f59750: Pull complete +b1be52d2d47d: Pull complete +6792f7885d6f: Pull complete +Digest: sha256:055c5ae8aab60cee82cec6e2bf7c507738d4b3224aba44628bd42bc446af5480 +Status: Downloaded newer image for opensourcemano/keystone:11 +docker.io/opensourcemano/keystone:11 +Pulling opensourcemano/ro:11 docker image +11: Pulling from opensourcemano/ro +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +f5d59c25a781: Already exists +d37e2f98afe2: Pulling fs layer +ffa7f5fe465c: Pulling fs layer +d973cbca1a37: Pulling fs layer +a2b7cfaf29d3: Pulling fs layer +51518ef4214f: Pulling fs layer +a2b7cfaf29d3: Waiting +51518ef4214f: Waiting +d973cbca1a37: Verifying Checksum +d973cbca1a37: Download complete +a2b7cfaf29d3: Verifying Checksum +a2b7cfaf29d3: Download complete +d37e2f98afe2: Verifying Checksum +d37e2f98afe2: Download complete +51518ef4214f: Verifying Checksum +51518ef4214f: Download complete +ffa7f5fe465c: Verifying Checksum +ffa7f5fe465c: Download complete +d37e2f98afe2: Pull complete +ffa7f5fe465c: Pull complete +d973cbca1a37: Pull complete +a2b7cfaf29d3: Pull complete +51518ef4214f: Pull complete +Digest: sha256:18869b2a5a7f274807c6c9a002d33aafad2396345025edd6b89b04ad40b9d586 +Status: Downloaded newer image for opensourcemano/ro:11 +docker.io/opensourcemano/ro:11 +Pulling opensourcemano/lcm:11 docker image +11: Pulling from opensourcemano/lcm +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +f5d59c25a781: Already exists +65c18c0e9bc7: Already exists +4a97f09a569c: Pulling fs layer +b2c57e7486df: Pulling fs layer +0a3d4f00fbc7: Pulling fs layer +8459dbade3e8: Pulling fs layer +a7f68d549cd5: Pulling fs layer +1df42af22e58: Pulling fs layer +051f40475366: Pulling fs layer +9fc84f0eb0db: Pulling fs layer +96b4c0ab0b16: Pulling fs layer +e0bbc20f517a: Pulling fs layer +31c668e1443a: Pulling fs layer +b61f559f3157: Pulling fs layer +8459dbade3e8: Waiting +a7f68d549cd5: Waiting +1df42af22e58: Waiting +051f40475366: Waiting +9fc84f0eb0db: Waiting +96b4c0ab0b16: Waiting +e0bbc20f517a: Waiting +31c668e1443a: Waiting +b61f559f3157: Waiting +4a97f09a569c: Download complete +0a3d4f00fbc7: Verifying Checksum +0a3d4f00fbc7: Download complete +4a97f09a569c: Pull complete +8459dbade3e8: Verifying Checksum +8459dbade3e8: Download complete +b2c57e7486df: Verifying Checksum +b2c57e7486df: Download complete +a7f68d549cd5: Verifying Checksum +a7f68d549cd5: Download complete +1df42af22e58: Verifying Checksum +1df42af22e58: Download complete +051f40475366: Verifying Checksum +051f40475366: Download complete +9fc84f0eb0db: Verifying Checksum +9fc84f0eb0db: Download complete +31c668e1443a: Verifying Checksum +31c668e1443a: Download complete +b61f559f3157: Verifying Checksum +b61f559f3157: Download complete +96b4c0ab0b16: Verifying Checksum +96b4c0ab0b16: Download complete +e0bbc20f517a: Verifying Checksum +e0bbc20f517a: Download complete +b2c57e7486df: Pull complete +0a3d4f00fbc7: Pull complete +8459dbade3e8: Pull complete +a7f68d549cd5: Pull complete +1df42af22e58: Pull complete +051f40475366: Pull complete +9fc84f0eb0db: Pull complete +96b4c0ab0b16: Pull complete +e0bbc20f517a: Pull complete +31c668e1443a: Pull complete +b61f559f3157: Pull complete +Digest: sha256:2c56a509c8e461f3714115612a25f0dd90f5d01823815b9bebfc6b8c5b1aefa0 +Status: Downloaded newer image for opensourcemano/lcm:11 +docker.io/opensourcemano/lcm:11 +Pulling opensourcemano/ng-ui:11 docker image +11: Pulling from opensourcemano/ng-ui +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +04166b4508f4: Pulling fs layer +e561bebf8747: Pulling fs layer +c337aceaae52: Pulling fs layer +96c77785d1c7: Pulling fs layer +96c77785d1c7: Waiting +c337aceaae52: Verifying Checksum +c337aceaae52: Download complete +e561bebf8747: Download complete +04166b4508f4: Verifying Checksum +04166b4508f4: Download complete +96c77785d1c7: Verifying Checksum +96c77785d1c7: Download complete +04166b4508f4: Pull complete +e561bebf8747: Pull complete +c337aceaae52: Pull complete +96c77785d1c7: Pull complete +Digest: sha256:0ed13935f40bd9c3b79bcb38bbbc6765bd6942bb61cae02565e610783c69ac1e +Status: Downloaded newer image for opensourcemano/ng-ui:11 +docker.io/opensourcemano/ng-ui:11 +Pulling opensourcemano/osmclient:11 docker image +11: Pulling from opensourcemano/osmclient +d7bfe07ed847: Already exists +628bcfb499cb: Already exists +f5d59c25a781: Already exists +5e9bc90fc06f: Pulling fs layer +fa4ca325e2a3: Pulling fs layer +5e9bc90fc06f: Verifying Checksum +5e9bc90fc06f: Download complete +fa4ca325e2a3: Verifying Checksum +fa4ca325e2a3: Download complete +5e9bc90fc06f: Pull complete +fa4ca325e2a3: Pull complete +Digest: sha256:1de552a72b3c15d11c3941be95bcd0dc89effb5700ddad73bf64aa7cc96c1416 +Status: Downloaded newer image for opensourcemano/osmclient:11 +docker.io/opensourcemano/osmclient:11 +Finished pulling and generating docker images +Track docker_images docker_images_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515489&event=docker_images&operation=docker_images_ok&value=&comment=&tags= +Track osm_files manifest_files_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515489&event=osm_files&operation=manifest_files_ok&value=&comment=&tags= +Doing a backup of existing env files +cp: cannot stat '/etc/osm/docker/keystone-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/keystone.env': No such file or directory +cp: cannot stat '/etc/osm/docker/lcm.env': No such file or directory +cp: cannot stat '/etc/osm/docker/mon.env': No such file or directory +cp: cannot stat '/etc/osm/docker/nbi.env': No such file or directory +cp: cannot stat '/etc/osm/docker/pol.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro.env': No such file or directory +Generating docker env files +OSMLCM_DATABASE_COMMONKEY=Qk49hPlciWmc020vxXBWn5xiCyOAiDRO +OSMLCM_VCA_HOST=192.168.64.23 +OSMLCM_VCA_SECRET=9dc5decf47cbf27da5c6059fde0606b1 +OSMLCM_VCA_PUBKEY=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDzgJ4FXAKQAbe2yzd3Tg31wSmxU9RNM75WymY8T89vm01Ms4JtJEEcgJ+aGNisdxPEWK8OFQQ1ovfjGiN8GvZSoHuqR7QvHeKtN4jKhP8yoNEYgDecVXK93HyEky9t0TY+XT42crEHVqCtDWfSKd4ZqwHrVmgnTYJEJ+8tjO9jJGTvbtoD+FxQRg5B3SHm+u0mhtnaLnOrlBIgxCMWBDK7Zsv1ESNKt2WUSAWfDpfbMhYKAUab6HreXvsBq9dNGmXd1nSan+9HW/auziPoL42mT6PTZSAgWQsd2SpowmeLsSDw7mpmgZkLUSBtNB5WEufdfoECTpw3amt1rAHAJKDb juju-client-key +OSMLCM_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQUtVTk5Nb0hJNjdGdllscGIxOGxieUUxQkp5RE1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TmpJNQpNVFExTmpReFdoY05Nekl3TmpJNU1UVXdNVFF4V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBNUEvdERvMDkKOVFDby94RW8zZDQxUnEwc0hQLzdPUjhRYXluZm83Lzk1K1Mvcm9GaU9mOTl2RU5kM2wvZUE4Mks5Z2xSL1RkMApiRkVtMmwvU3dYNjNmYW4vWTdQVnhTWHI1ZC9HMG5oRE9SZlFTem42MWZMSndsaTF2REhGR29yZElLQlBXQmhYCnZUZGZFVHFpVFpHVDRJYVhqVDQrYUtmQXlXUGhJOEtEQW1hY3VKODBhQU5PLzdqTUw3d1RnZHEwYVNmUUtmbUsKT0dKcW5naTJ2eGlmSHJOcENmTUJSbDkvTDVNYnV3OXl0WHNCaFNyUEJJTnJrODFocTJiQUhWajdOdmZQZlB1ZQphMlVCYjc4OGtNUCt4b25HaE4rOHBVWm9vQTNIRk1wM0pDVmQ5amJORGgvTTN1MnRSWmJyRk1zRVIzeFYrQXFnCmk2STN5OHhMRk5TYjExaFBibWUzWXB1b1N5SC8rWkRDb2pUY1hobVU2dWZweUFBQ1dlQ01nVkJzRkpLNzZDQzYKdmROQXNMMXVlVktCM0RXbHloNHBob1dtTVBiMHZKVUl1RzNoMVFETTJibVNENkpUUGJBMFNyQXBKcGlxTWJQeQp6QTlGZ2ZsYTJSMVIzNStyOEVNTlZwZFZMRzNkUzI5dHg1UzA3RTJYdTFackhoYU5NZDc2VmpCSEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU1kKNWpUUTU2UlRYTnFGODltNFE4UjJoU2Z6VVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQTNpUzVxTlJhbWF3SwpwNXdMUHhKOXdRZGtTb3pqUUdxbHZ6dURydWpEZ3QzUloxS2l1MW5lQ1RMY3pMd0lOTWJhaGZaNzlOUW9hbXFKCjNqdWsrR1BvcnJnREozTjN4ZFhobTIzTzN6UE1INm93SDJwSGM4ZXYwK1JrcUx5TmduRmF2M05LUks1M0E3WmYKbkdNMGczOWFUVEF2NlNCdVZHRm93WGZNN0xSL2VTOFdVeXZRRlRicUhsbXBSVllLQ1c3WW8yRkR4dlJhOWVDKwpMbFJ1d3Qxb1RKMVB3alFJaGJpRSs1dGZCOTQ0ejhzZlhqTFNXbFd1cW5UcGk5TzJuTDNFUlZibHA1a0M4dUFUClNTeUV5Z0RPeHRJNGVOQ2hLa3drTnNORlJDeU9DTlZIdi9vQlcvTHB3cGxiR1pidWh0OE0xeWdCUjE5YkdJVksKWFNrWk1LVElycnc2VjZKSitSc1BPeFdNMmVodEtHdDNwTElIMVhqVnByeE5CMXp3R3ZHMFZETGttQWxENjNiUAo5NG1SaHJzM3ZXRGltSWtJUFpQS2ludnVpbVBzdGx1anE0c29RcWt3YytMQmlIS0M4VjBzQmRlMmdyZHU0YUVKCmRXaUI2UWZjNnJmUnF6V1k4RWFlV2xnL0c3RDVxc2N3Yy9SbEVITy8zVFM3b2IrUEVBRmgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK +# OSMLCM_VCA_ENABLEOSUPGRADE=false +# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/ +OSMLCM_VCA_CLOUD=lxd-cloud +OSMLCM_VCA_K8S_CLOUD=k8scloud +MYSQL_ROOT_PASSWORD=BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz +RO_DB_ROOT_PASSWORD=BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz +OSMRO_DATABASE_COMMONKEY=Qk49hPlciWmc020vxXBWn5xiCyOAiDRO +MYSQL_ROOT_PASSWORD=BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz +ROOT_DB_PASSWORD=BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz +KEYSTONE_DB_PASSWORD=KwXanzVPNxrQq0cBj3bfACz74KHwoIKJ +SERVICE_PASSWORD=qEZQSi5c9CWc8WucnpmuQzavHzi1sQnu +OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=qEZQSi5c9CWc8WucnpmuQzavHzi1sQnu +OSMNBI_DATABASE_COMMONKEY=Qk49hPlciWmc020vxXBWn5xiCyOAiDRO +OSMMON_KEYSTONE_SERVICE_PASSWORD=qEZQSi5c9CWc8WucnpmuQzavHzi1sQnu +OSMMON_DATABASE_COMMONKEY=Qk49hPlciWmc020vxXBWn5xiCyOAiDRO +OSMMON_SQL_DATABASE_URI=mysql://root:BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz@mysql:3306/mon +OS_NOTIFIER_URI=http://192.168.64.23:8662 +OSMMON_VCA_HOST=192.168.64.23 +OSMMON_VCA_SECRET=9dc5decf47cbf27da5c6059fde0606b1 +OSMMON_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQUtVTk5Nb0hJNjdGdllscGIxOGxieUUxQkp5RE1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TmpJNQpNVFExTmpReFdoY05Nekl3TmpJNU1UVXdNVFF4V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBNUEvdERvMDkKOVFDby94RW8zZDQxUnEwc0hQLzdPUjhRYXluZm83Lzk1K1Mvcm9GaU9mOTl2RU5kM2wvZUE4Mks5Z2xSL1RkMApiRkVtMmwvU3dYNjNmYW4vWTdQVnhTWHI1ZC9HMG5oRE9SZlFTem42MWZMSndsaTF2REhGR29yZElLQlBXQmhYCnZUZGZFVHFpVFpHVDRJYVhqVDQrYUtmQXlXUGhJOEtEQW1hY3VKODBhQU5PLzdqTUw3d1RnZHEwYVNmUUtmbUsKT0dKcW5naTJ2eGlmSHJOcENmTUJSbDkvTDVNYnV3OXl0WHNCaFNyUEJJTnJrODFocTJiQUhWajdOdmZQZlB1ZQphMlVCYjc4OGtNUCt4b25HaE4rOHBVWm9vQTNIRk1wM0pDVmQ5amJORGgvTTN1MnRSWmJyRk1zRVIzeFYrQXFnCmk2STN5OHhMRk5TYjExaFBibWUzWXB1b1N5SC8rWkRDb2pUY1hobVU2dWZweUFBQ1dlQ01nVkJzRkpLNzZDQzYKdmROQXNMMXVlVktCM0RXbHloNHBob1dtTVBiMHZKVUl1RzNoMVFETTJibVNENkpUUGJBMFNyQXBKcGlxTWJQeQp6QTlGZ2ZsYTJSMVIzNStyOEVNTlZwZFZMRzNkUzI5dHg1UzA3RTJYdTFackhoYU5NZDc2VmpCSEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU1kKNWpUUTU2UlRYTnFGODltNFE4UjJoU2Z6VVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQTNpUzVxTlJhbWF3SwpwNXdMUHhKOXdRZGtTb3pqUUdxbHZ6dURydWpEZ3QzUloxS2l1MW5lQ1RMY3pMd0lOTWJhaGZaNzlOUW9hbXFKCjNqdWsrR1BvcnJnREozTjN4ZFhobTIzTzN6UE1INm93SDJwSGM4ZXYwK1JrcUx5TmduRmF2M05LUks1M0E3WmYKbkdNMGczOWFUVEF2NlNCdVZHRm93WGZNN0xSL2VTOFdVeXZRRlRicUhsbXBSVllLQ1c3WW8yRkR4dlJhOWVDKwpMbFJ1d3Qxb1RKMVB3alFJaGJpRSs1dGZCOTQ0ejhzZlhqTFNXbFd1cW5UcGk5TzJuTDNFUlZibHA1a0M4dUFUClNTeUV5Z0RPeHRJNGVOQ2hLa3drTnNORlJDeU9DTlZIdi9vQlcvTHB3cGxiR1pidWh0OE0xeWdCUjE5YkdJVksKWFNrWk1LVElycnc2VjZKSitSc1BPeFdNMmVodEtHdDNwTElIMVhqVnByeE5CMXp3R3ZHMFZETGttQWxENjNiUAo5NG1SaHJzM3ZXRGltSWtJUFpQS2ludnVpbVBzdGx1anE0c29RcWt3YytMQmlIS0M4VjBzQmRlMmdyZHU0YUVKCmRXaUI2UWZjNnJmUnF6V1k4RWFlV2xnL0c3RDVxc2N3Yy9SbEVITy8zVFM3b2IrUEVBRmgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK +OSMPOL_SQL_DATABASE_URI=mysql://root:BgS1rJ2kIjaKkHQDirhsHUODjNRLqXkz@mysql:3306/pol +Finished generation of docker env files +Track osm_files env_files_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515490&event=osm_files&operation=env_files_ok&value=&comment=&tags= +Added 'osm' model on k8scloud with credential 'k8scloud' for user 'admin' +Located charm "mongodb-k8s" in charm-hub, revision 1 +Deploying "mongodb-k8s" from charm-hub charm "mongodb-k8s", revision 1 in channel stable on focal +Track deploy_osm deploy_charmed_services_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515507&event=deploy_osm&operation=deploy_charmed_services_ok&value=&comment=&tags= +Error from server (AlreadyExists): namespaces "osm" already exists +secret/lcm-secret created +secret/mon-secret created +secret/nbi-secret created +secret/ro-db-secret created +secret/ro-secret created +secret/keystone-secret created +secret/pol-secret created +Track deploy_osm kube_secrets_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515509&event=deploy_osm&operation=kube_secrets_ok&value=&comment=&tags= +Track deploy_osm update_manifest_files_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515509&event=deploy_osm&operation=update_manifest_files_ok&value=&comment=&tags= +Track deploy_osm namespace_vol_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515511&event=deploy_osm&operation=namespace_vol_ok&value=&comment=&tags= +clusterrole.rbac.authorization.k8s.io/grafana-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/grafana-clusterrolebinding created +secret/grafana created +serviceaccount/grafana created +configmap/grafana-dashboard-provider created +configmap/grafana-datasource created +configmap/grafana created +deployment.apps/grafana created +service/grafana created +service/kafka created +statefulset.apps/kafka created +service/keystone created +deployment.apps/keystone created +deployment.apps/lcm created +service/mon created +deployment.apps/mon created +service/mysql created +statefulset.apps/mysql created +service/nbi created +deployment.apps/nbi created +service/ng-ui created +deployment.apps/ng-ui created +deployment.apps/pol created +service/prometheus created +configmap/prom created +statefulset.apps/prometheus created +service/ro created +deployment.apps/ro created +service/zookeeper created +statefulset.apps/zookeeper created +Track deploy_osm deploy_osm_services_k8s_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515518&event=deploy_osm&operation=deploy_osm_services_k8s_ok&value=&comment=&tags= +sed: can't read /etc/osm/docker/osm_pla/pla.yaml: No such file or directory +error: the path "/etc/osm/docker/osm_pla" does not exist +Track deploy_osm deploy_osm_pla_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515520&event=deploy_osm&operation=deploy_osm_pla_ok&value=&comment=&tags= + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 3120 100 3120 0 0 9043 0 --:--:-- --:--:-- --:--:-- 9043 +OK +Hit:1 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://download.docker.com/linux/ubuntu focal InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Get:8 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/osmclient amd64 Packages [474 B] +Get:9 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/IM amd64 Packages [901 B] +Fetched 1375 B in 5s (254 B/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:4 http://security.ubuntu.com/ubuntu focal-security InRelease +Hit:6 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Fetched 9383 B in 4s (2197 B/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-9 + dpkg-dev fakeroot g++ g++-9 gcc gcc-9 gcc-9-base libalgorithm-diff-perl + libalgorithm-diff-xs-perl libalgorithm-merge-perl libasan5 libatomic1 + libbinutils libc-dev-bin libc6-dev libcc1-0 libcrypt-dev libctf-nobfd0 + libctf0 libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl + libgcc-9-dev libgomp1 libisl22 libitm1 liblsan0 libmpc3 libpython3-dev + libpython3.8-dev libquadmath0 libstdc++-9-dev libtsan0 libubsan1 + linux-libc-dev make manpages-dev python-pip-whl python3-dev python3-wheel + python3.8-dev zlib1g-dev +Suggested packages: + binutils-doc cpp-doc gcc-9-locales debian-keyring g++-multilib + g++-9-multilib gcc-9-doc gcc-multilib autoconf automake libtool flex bison + gdb gcc-doc gcc-9-multilib glibc-doc bzr libstdc++-9-doc make-doc +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-9 + dpkg-dev fakeroot g++ g++-9 gcc gcc-9 gcc-9-base libalgorithm-diff-perl + libalgorithm-diff-xs-perl libalgorithm-merge-perl libasan5 libatomic1 + libbinutils libc-dev-bin libc6-dev libcc1-0 libcrypt-dev libctf-nobfd0 + libctf0 libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl + libgcc-9-dev libgomp1 libisl22 libitm1 liblsan0 libmpc3 libpython3-dev + libpython3.8-dev libquadmath0 libstdc++-9-dev libtsan0 libubsan1 + linux-libc-dev make manpages-dev python-pip-whl python3-dev python3-pip + python3-wheel python3.8-dev zlib1g-dev +0 upgraded, 50 newly installed, 0 to remove and 21 not upgraded. +Need to get 52.2 MB of archives. +After this operation, 228 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 binutils-common amd64 2.34-6ubuntu1.3 [207 kB] +Get:2 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libbinutils amd64 2.34-6ubuntu1.3 [474 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libctf-nobfd0 amd64 2.34-6ubuntu1.3 [47.4 kB] +Get:4 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libctf0 amd64 2.34-6ubuntu1.3 [46.6 kB] +Get:5 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.34-6ubuntu1.3 [1613 kB] +Get:6 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 binutils amd64 2.34-6ubuntu1.3 [3380 B] +Get:7 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libc-dev-bin amd64 2.31-0ubuntu9.9 [71.8 kB] +Get:8 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 linux-libc-dev amd64 5.4.0-121.137 [1099 kB] +Get:9 http://archive.ubuntu.com/ubuntu focal/main amd64 libcrypt-dev amd64 1:4.4.10-10ubuntu4 [104 kB] +Get:10 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libc6-dev amd64 2.31-0ubuntu9.9 [2519 kB] +Get:11 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 gcc-9-base amd64 9.4.0-1ubuntu1~20.04.1 [19.4 kB] +Get:12 http://archive.ubuntu.com/ubuntu focal/main amd64 libisl22 amd64 0.22.1-1 [592 kB] +Get:13 http://archive.ubuntu.com/ubuntu focal/main amd64 libmpc3 amd64 1.1.0-1 [40.8 kB] +Get:14 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 cpp-9 amd64 9.4.0-1ubuntu1~20.04.1 [7500 kB] +Get:15 http://archive.ubuntu.com/ubuntu focal/main amd64 cpp amd64 4:9.3.0-1ubuntu2 [27.6 kB] +Get:16 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libcc1-0 amd64 10.3.0-1ubuntu1~20.04 [48.8 kB] +Get:17 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libgomp1 amd64 10.3.0-1ubuntu1~20.04 [102 kB] +Get:18 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libitm1 amd64 10.3.0-1ubuntu1~20.04 [26.2 kB] +Get:19 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libatomic1 amd64 10.3.0-1ubuntu1~20.04 [9284 B] +Get:20 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libasan5 amd64 9.4.0-1ubuntu1~20.04.1 [2751 kB] +Get:21 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 liblsan0 amd64 10.3.0-1ubuntu1~20.04 [835 kB] +Get:22 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libtsan0 amd64 10.3.0-1ubuntu1~20.04 [2009 kB] +Get:23 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libubsan1 amd64 10.3.0-1ubuntu1~20.04 [784 kB] +Get:24 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libquadmath0 amd64 10.3.0-1ubuntu1~20.04 [146 kB] +Get:25 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libgcc-9-dev amd64 9.4.0-1ubuntu1~20.04.1 [2359 kB] +Get:26 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 gcc-9 amd64 9.4.0-1ubuntu1~20.04.1 [8274 kB] +Get:27 http://archive.ubuntu.com/ubuntu focal/main amd64 gcc amd64 4:9.3.0-1ubuntu2 [5208 B] +Get:28 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libstdc++-9-dev amd64 9.4.0-1ubuntu1~20.04.1 [1722 kB] +Get:29 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 g++-9 amd64 9.4.0-1ubuntu1~20.04.1 [8420 kB] +Get:30 http://archive.ubuntu.com/ubuntu focal/main amd64 g++ amd64 4:9.3.0-1ubuntu2 [1604 B] +Get:31 http://archive.ubuntu.com/ubuntu focal/main amd64 make amd64 4.2.1-1.2 [162 kB] +Get:32 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libdpkg-perl all 1.19.7ubuntu3.2 [231 kB] +Get:33 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 dpkg-dev all 1.19.7ubuntu3.2 [679 kB] +Get:34 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 build-essential amd64 12.8ubuntu1.1 [4664 B] +Get:35 http://archive.ubuntu.com/ubuntu focal/main amd64 libfakeroot amd64 1.24-1 [25.7 kB] +Get:36 http://archive.ubuntu.com/ubuntu focal/main amd64 fakeroot amd64 1.24-1 [62.6 kB] +Get:37 http://archive.ubuntu.com/ubuntu focal/main amd64 libalgorithm-diff-perl all 1.19.03-2 [46.6 kB] +Get:38 http://archive.ubuntu.com/ubuntu focal/main amd64 libalgorithm-diff-xs-perl amd64 0.04-6 [11.3 kB] +Get:39 http://archive.ubuntu.com/ubuntu focal/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:40 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libexpat1-dev amd64 2.2.9-1ubuntu0.4 [117 kB] +Get:41 http://archive.ubuntu.com/ubuntu focal/main amd64 libfile-fcntllock-perl amd64 0.22-3build4 [33.1 kB] +Get:42 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libpython3.8-dev amd64 3.8.10-0ubuntu1~20.04.4 [3952 kB] +Get:43 http://archive.ubuntu.com/ubuntu focal/main amd64 libpython3-dev amd64 3.8.2-0ubuntu2 [7236 B] +Get:44 http://archive.ubuntu.com/ubuntu focal/main amd64 manpages-dev all 5.05-1 [2266 kB] +Get:45 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 python-pip-whl all 20.0.2-5ubuntu1.6 [1805 kB] +Get:46 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 zlib1g-dev amd64 1:1.2.11.dfsg-2ubuntu1.3 [155 kB] +Get:47 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 python3.8-dev amd64 3.8.10-0ubuntu1~20.04.4 [514 kB] +Get:48 http://archive.ubuntu.com/ubuntu focal/main amd64 python3-dev amd64 3.8.2-0ubuntu2 [1212 B] +Get:49 http://archive.ubuntu.com/ubuntu focal/universe amd64 python3-wheel all 0.34.2-1 [23.8 kB] +Get:50 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 python3-pip all 20.0.2-5ubuntu1.6 [231 kB] +Fetched 52.2 MB in 4s (13.8 MB/s) +Selecting previously unselected package binutils-common:amd64. +(Reading database ... 65318 files and directories currently installed.) +Preparing to unpack .../00-binutils-common_2.34-6ubuntu1.3_amd64.deb ... +Unpacking binutils-common:amd64 (2.34-6ubuntu1.3) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../01-libbinutils_2.34-6ubuntu1.3_amd64.deb ... +Unpacking libbinutils:amd64 (2.34-6ubuntu1.3) ... +Selecting previously unselected package libctf-nobfd0:amd64. +Preparing to unpack .../02-libctf-nobfd0_2.34-6ubuntu1.3_amd64.deb ... +Unpacking libctf-nobfd0:amd64 (2.34-6ubuntu1.3) ... +Selecting previously unselected package libctf0:amd64. +Preparing to unpack .../03-libctf0_2.34-6ubuntu1.3_amd64.deb ... +Unpacking libctf0:amd64 (2.34-6ubuntu1.3) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../04-binutils-x86-64-linux-gnu_2.34-6ubuntu1.3_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.34-6ubuntu1.3) ... +Selecting previously unselected package binutils. +Preparing to unpack .../05-binutils_2.34-6ubuntu1.3_amd64.deb ... +Unpacking binutils (2.34-6ubuntu1.3) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../06-libc-dev-bin_2.31-0ubuntu9.9_amd64.deb ... +Unpacking libc-dev-bin (2.31-0ubuntu9.9) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../07-linux-libc-dev_5.4.0-121.137_amd64.deb ... +Unpacking linux-libc-dev:amd64 (5.4.0-121.137) ... +Selecting previously unselected package libcrypt-dev:amd64. +Preparing to unpack .../08-libcrypt-dev_1%3a4.4.10-10ubuntu4_amd64.deb ... +Unpacking libcrypt-dev:amd64 (1:4.4.10-10ubuntu4) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../09-libc6-dev_2.31-0ubuntu9.9_amd64.deb ... +Unpacking libc6-dev:amd64 (2.31-0ubuntu9.9) ... +Selecting previously unselected package gcc-9-base:amd64. +Preparing to unpack .../10-gcc-9-base_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking gcc-9-base:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package libisl22:amd64. +Preparing to unpack .../11-libisl22_0.22.1-1_amd64.deb ... +Unpacking libisl22:amd64 (0.22.1-1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../12-libmpc3_1.1.0-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.1.0-1) ... +Selecting previously unselected package cpp-9. +Preparing to unpack .../13-cpp-9_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking cpp-9 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package cpp. +Preparing to unpack .../14-cpp_4%3a9.3.0-1ubuntu2_amd64.deb ... +Unpacking cpp (4:9.3.0-1ubuntu2) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../15-libcc1-0_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libcc1-0:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../16-libgomp1_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libgomp1:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../17-libitm1_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libitm1:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../18-libatomic1_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libatomic1:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libasan5:amd64. +Preparing to unpack .../19-libasan5_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking libasan5:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../20-liblsan0_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking liblsan0:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../21-libtsan0_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libtsan0:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libubsan1:amd64. +Preparing to unpack .../22-libubsan1_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libubsan1:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../23-libquadmath0_10.3.0-1ubuntu1~20.04_amd64.deb ... +Unpacking libquadmath0:amd64 (10.3.0-1ubuntu1~20.04) ... +Selecting previously unselected package libgcc-9-dev:amd64. +Preparing to unpack .../24-libgcc-9-dev_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking libgcc-9-dev:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package gcc-9. +Preparing to unpack .../25-gcc-9_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking gcc-9 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package gcc. +Preparing to unpack .../26-gcc_4%3a9.3.0-1ubuntu2_amd64.deb ... +Unpacking gcc (4:9.3.0-1ubuntu2) ... +Selecting previously unselected package libstdc++-9-dev:amd64. +Preparing to unpack .../27-libstdc++-9-dev_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking libstdc++-9-dev:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package g++-9. +Preparing to unpack .../28-g++-9_9.4.0-1ubuntu1~20.04.1_amd64.deb ... +Unpacking g++-9 (9.4.0-1ubuntu1~20.04.1) ... +Selecting previously unselected package g++. +Preparing to unpack .../29-g++_4%3a9.3.0-1ubuntu2_amd64.deb ... +Unpacking g++ (4:9.3.0-1ubuntu2) ... +Selecting previously unselected package make. +Preparing to unpack .../30-make_4.2.1-1.2_amd64.deb ... +Unpacking make (4.2.1-1.2) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../31-libdpkg-perl_1.19.7ubuntu3.2_all.deb ... +Unpacking libdpkg-perl (1.19.7ubuntu3.2) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../32-dpkg-dev_1.19.7ubuntu3.2_all.deb ... +Unpacking dpkg-dev (1.19.7ubuntu3.2) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../33-build-essential_12.8ubuntu1.1_amd64.deb ... +Unpacking build-essential (12.8ubuntu1.1) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../34-libfakeroot_1.24-1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.24-1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../35-fakeroot_1.24-1_amd64.deb ... +Unpacking fakeroot (1.24-1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../36-libalgorithm-diff-perl_1.19.03-2_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-2) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../37-libalgorithm-diff-xs-perl_0.04-6_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-6) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../38-libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../39-libexpat1-dev_2.2.9-1ubuntu0.4_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.2.9-1ubuntu0.4) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../40-libfile-fcntllock-perl_0.22-3build4_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3build4) ... +Selecting previously unselected package libpython3.8-dev:amd64. +Preparing to unpack .../41-libpython3.8-dev_3.8.10-0ubuntu1~20.04.4_amd64.deb ... +Unpacking libpython3.8-dev:amd64 (3.8.10-0ubuntu1~20.04.4) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../42-libpython3-dev_3.8.2-0ubuntu2_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.8.2-0ubuntu2) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../43-manpages-dev_5.05-1_all.deb ... +Unpacking manpages-dev (5.05-1) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../44-python-pip-whl_20.0.2-5ubuntu1.6_all.deb ... +Unpacking python-pip-whl (20.0.2-5ubuntu1.6) ... +Selecting previously unselected package zlib1g-dev:amd64. +Preparing to unpack .../45-zlib1g-dev_1%3a1.2.11.dfsg-2ubuntu1.3_amd64.deb ... +Unpacking zlib1g-dev:amd64 (1:1.2.11.dfsg-2ubuntu1.3) ... +Selecting previously unselected package python3.8-dev. +Preparing to unpack .../46-python3.8-dev_3.8.10-0ubuntu1~20.04.4_amd64.deb ... +Unpacking python3.8-dev (3.8.10-0ubuntu1~20.04.4) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../47-python3-dev_3.8.2-0ubuntu2_amd64.deb ... +Unpacking python3-dev (3.8.2-0ubuntu2) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../48-python3-wheel_0.34.2-1_all.deb ... +Unpacking python3-wheel (0.34.2-1) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../49-python3-pip_20.0.2-5ubuntu1.6_all.deb ... +Unpacking python3-pip (20.0.2-5ubuntu1.6) ... +Setting up manpages-dev (5.05-1) ... +Setting up libfile-fcntllock-perl (0.22-3build4) ... +Setting up libalgorithm-diff-perl (1.19.03-2) ... +Setting up binutils-common:amd64 (2.34-6ubuntu1.3) ... +Setting up linux-libc-dev:amd64 (5.4.0-121.137) ... +Setting up libctf-nobfd0:amd64 (2.34-6ubuntu1.3) ... +Setting up libgomp1:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up python3-wheel (0.34.2-1) ... +Setting up libfakeroot:amd64 (1.24-1) ... +Setting up fakeroot (1.24-1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up make (4.2.1-1.2) ... +Setting up libquadmath0:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up libmpc3:amd64 (1.1.0-1) ... +Setting up libatomic1:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up libdpkg-perl (1.19.7ubuntu3.2) ... +Setting up libubsan1:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up libcrypt-dev:amd64 (1:4.4.10-10ubuntu4) ... +Setting up libisl22:amd64 (0.22.1-1) ... +Setting up python-pip-whl (20.0.2-5ubuntu1.6) ... +Setting up libbinutils:amd64 (2.34-6ubuntu1.3) ... +Setting up libc-dev-bin (2.31-0ubuntu9.9) ... +Setting up libalgorithm-diff-xs-perl (0.04-6) ... +Setting up libcc1-0:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up liblsan0:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up libitm1:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up gcc-9-base:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libtsan0:amd64 (10.3.0-1ubuntu1~20.04) ... +Setting up libctf0:amd64 (2.34-6ubuntu1.3) ... +Setting up libasan5:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Setting up python3-pip (20.0.2-5ubuntu1.6) ... +Setting up cpp-9 (9.4.0-1ubuntu1~20.04.1) ... +Setting up libc6-dev:amd64 (2.31-0ubuntu9.9) ... +Setting up binutils-x86-64-linux-gnu (2.34-6ubuntu1.3) ... +Setting up binutils (2.34-6ubuntu1.3) ... +Setting up dpkg-dev (1.19.7ubuntu3.2) ... +Setting up libgcc-9-dev:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Setting up libexpat1-dev:amd64 (2.2.9-1ubuntu0.4) ... +Setting up libpython3.8-dev:amd64 (3.8.10-0ubuntu1~20.04.4) ... +Setting up zlib1g-dev:amd64 (1:1.2.11.dfsg-2ubuntu1.3) ... +Setting up cpp (4:9.3.0-1ubuntu2) ... +Setting up gcc-9 (9.4.0-1ubuntu1~20.04.1) ... +Setting up libpython3-dev:amd64 (3.8.2-0ubuntu2) ... +Setting up libstdc++-9-dev:amd64 (9.4.0-1ubuntu1~20.04.1) ... +Setting up gcc (4:9.3.0-1ubuntu2) ... +Setting up g++-9 (9.4.0-1ubuntu1~20.04.1) ... +Setting up python3.8-dev (3.8.10-0ubuntu1~20.04.4) ... +Setting up g++ (4:9.3.0-1ubuntu2) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up build-essential (12.8ubuntu1.1) ... +Setting up python3-dev (3.8.2-0ubuntu2) ... +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for libc-bin (2.31-0ubuntu9.9) ... +Collecting pip + Downloading pip-22.1.2-py3-none-any.whl (2.1 MB) +Installing collected packages: pip + Attempting uninstall: pip + Found existing installation: pip 20.0.2 + Not uninstalling pip at /usr/lib/python3/dist-packages, outside environment /usr + Can't uninstall 'pip'. No files were found to uninstall. +Successfully installed pip-22.1.2 +Collecting python-magic + Downloading python_magic-0.4.27-py2.py3-none-any.whl (13 kB) +Collecting pyangbind + Downloading pyangbind-0.8.1.tar.gz (48 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 48.6/48.6 kB 2.5 MB/s eta 0:00:00 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Collecting verboselogs + Downloading verboselogs-1.7-py2.py3-none-any.whl (11 kB) +Collecting bitarray + Downloading bitarray-2.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (242 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 242.4/242.4 kB 1.6 MB/s eta 0:00:00 +Collecting enum34 + Downloading enum34-1.1.10-py3-none-any.whl (11 kB) +Collecting lxml + Downloading lxml-4.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.9 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.9/6.9 MB 7.8 MB/s eta 0:00:00 +Collecting pyang + Downloading pyang-2.5.3-py2.py3-none-any.whl (592 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 592.9/592.9 kB 6.8 MB/s eta 0:00:00 +Collecting regex + Downloading regex-2022.6.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (764 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 764.9/764.9 kB 2.2 MB/s eta 0:00:00 +Requirement already satisfied: six in /usr/lib/python3/dist-packages (from pyangbind) (1.14.0) +Building wheels for collected packages: pyangbind + Building wheel for pyangbind (setup.py): started + Building wheel for pyangbind (setup.py): finished with status 'done' + Created wheel for pyangbind: filename=pyangbind-0.8.1-py3-none-any.whl size=47761 sha256=5f0d027b2830fa98c3e5c263a6ebd47e5a84b3ed70a8dab32f0842f3a84f4d97 + Stored in directory: /root/.cache/pip/wheels/0c/fb/5a/3d09fc1fae9987036f767b8aa0076ac7f91ce4cd952de3f49a +Successfully built pyangbind +Installing collected packages: verboselogs, enum34, bitarray, regex, python-magic, lxml, pyang, pyangbind +Successfully installed bitarray-2.5.1 enum34-1.1.10 lxml-4.9.0 pyang-2.5.3 pyangbind-0.8.1 python-magic-0.4.27 regex-2022.6.2 verboselogs-1.7 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + python3-osm-im python3-osmclient +0 upgraded, 2 newly installed, 0 to remove and 21 not upgraded. +Need to get 256 kB of archives. +After this operation, 8304 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/IM amd64 python3-osm-im all 11.0.3+g2aa4f3e-1 [188 kB] +Get:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseELEVEN stable/osmclient amd64 python3-osmclient all 11.0.3+g3152c07-1 [68.6 kB] +Fetched 256 kB in 0s (684 kB/s) +Selecting previously unselected package python3-osm-im. +(Reading database ... 71431 files and directories currently installed.) +Preparing to unpack .../python3-osm-im_11.0.3+g2aa4f3e-1_all.deb ... +Unpacking python3-osm-im (11.0.3+g2aa4f3e-1) ... +Selecting previously unselected package python3-osmclient. +Preparing to unpack .../python3-osmclient_11.0.3+g3152c07-1_all.deb ... +Unpacking python3-osmclient (11.0.3+g3152c07-1) ... +Setting up python3-osmclient (11.0.3+g3152c07-1) ... +Setting up python3-osm-im (11.0.3+g2aa4f3e-1) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting bitarray==2.3.5 + Downloading bitarray-2.3.5.tar.gz (88 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 88.4/88.4 kB 2.4 MB/s eta 0:00:00 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: enum34==1.1.10 in /usr/local/lib/python3.8/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 19)) (1.1.10) +Collecting lxml==4.7.1 + Downloading lxml-4.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.9 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.9/6.9 MB 6.0 MB/s eta 0:00:00 +Collecting pyang==2.5.2 + Downloading pyang-2.5.2-py2.py3-none-any.whl (595 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 595.5/595.5 kB 5.6 MB/s eta 0:00:00 +Requirement already satisfied: pyangbind==0.8.1 in /usr/local/lib/python3.8/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 29)) (0.8.1) +Collecting pyyaml==5.4.1 + Downloading PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl (662 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 662.4/662.4 kB 5.5 MB/s eta 0:00:00 +Collecting regex==2021.11.10 + Downloading regex-2021.11.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (764 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 764.7/764.7 kB 7.1 MB/s eta 0:00:00 +Collecting six==1.16.0 + Downloading six-1.16.0-py2.py3-none-any.whl (11 kB) +Building wheels for collected packages: bitarray + Building wheel for bitarray (setup.py): started + Building wheel for bitarray (setup.py): finished with status 'done' + Created wheel for bitarray: filename=bitarray-2.3.5-cp38-cp38-linux_x86_64.whl size=188767 sha256=5fad29de4bee74cfc72ef13d88d800f1746a5520b83443edfafba7b7efcb6879 + Stored in directory: /home/ubuntu/.cache/pip/wheels/94/69/4b/830f24f80f3064bf446eb37eb441d7c1af74eda281bc2d9d03 +Successfully built bitarray +Installing collected packages: regex, bitarray, six, pyyaml, lxml, pyang +ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. +launchpadlib 1.10.13 requires testresources, which is not installed. +Successfully installed bitarray-2.3.5 lxml-4.7.1 pyang-2.5.2 pyyaml-5.4.1 regex-2021.11.10 six-1.16.0 +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + curl libcurl4 libssl1.1 +Suggested packages: + libcurl4-doc libidn11-dev libkrb5-dev libldap2-dev librtmp-dev libssh2-1-dev + pkg-config libssl-doc +The following NEW packages will be installed: + libcurl4-openssl-dev libssl-dev +The following packages will be upgraded: + curl libcurl4 libssl1.1 +3 upgraded, 2 newly installed, 0 to remove and 18 not upgraded. +Need to get 3623 kB of archives. +After this operation, 9552 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libssl1.1 amd64 1.1.1f-1ubuntu2.15 [1321 kB] +Get:2 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 curl amd64 7.68.0-1ubuntu2.12 [161 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libcurl4 amd64 7.68.0-1ubuntu2.12 [235 kB] +Get:4 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libcurl4-openssl-dev amd64 7.68.0-1ubuntu2.12 [322 kB] +Get:5 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 libssl-dev amd64 1.1.1f-1ubuntu2.15 [1584 kB] +Preconfiguring packages ... +Fetched 3623 kB in 1s (3001 kB/s) +(Reading database ... 71519 files and directories currently installed.) +Preparing to unpack .../libssl1.1_1.1.1f-1ubuntu2.15_amd64.deb ... +Unpacking libssl1.1:amd64 (1.1.1f-1ubuntu2.15) over (1.1.1f-1ubuntu2.13) ... +Preparing to unpack .../curl_7.68.0-1ubuntu2.12_amd64.deb ... +Unpacking curl (7.68.0-1ubuntu2.12) over (7.68.0-1ubuntu2.11) ... +Preparing to unpack .../libcurl4_7.68.0-1ubuntu2.12_amd64.deb ... +Unpacking libcurl4:amd64 (7.68.0-1ubuntu2.12) over (7.68.0-1ubuntu2.11) ... +Selecting previously unselected package libcurl4-openssl-dev:amd64. +Preparing to unpack .../libcurl4-openssl-dev_7.68.0-1ubuntu2.12_amd64.deb ... +Unpacking libcurl4-openssl-dev:amd64 (7.68.0-1ubuntu2.12) ... +Selecting previously unselected package libssl-dev:amd64. +Preparing to unpack .../libssl-dev_1.1.1f-1ubuntu2.15_amd64.deb ... +Unpacking libssl-dev:amd64 (1.1.1f-1ubuntu2.15) ... +Setting up libssl1.1:amd64 (1.1.1f-1ubuntu2.15) ... +Setting up libssl-dev:amd64 (1.1.1f-1ubuntu2.15) ... +Setting up libcurl4:amd64 (7.68.0-1ubuntu2.12) ... +Setting up curl (7.68.0-1ubuntu2.12) ... +Setting up libcurl4-openssl-dev:amd64 (7.68.0-1ubuntu2.12) ... +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for libc-bin (2.31-0ubuntu9.9) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting certifi==2021.10.8 + Downloading certifi-2021.10.8-py2.py3-none-any.whl (149 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 149.2/149.2 kB 3.5 MB/s eta 0:00:00 +Collecting charset-normalizer==2.0.10 + Downloading charset_normalizer-2.0.10-py3-none-any.whl (39 kB) +Collecting click==8.0.3 + Downloading click-8.0.3-py3-none-any.whl (97 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 97.5/97.5 kB 6.8 MB/s eta 0:00:00 +Collecting idna==3.3 + Downloading idna-3.3-py3-none-any.whl (61 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 61.2/61.2 kB 1.9 MB/s eta 0:00:00 +Collecting jinja2==3.0.3 + Downloading Jinja2-3.0.3-py3-none-any.whl (133 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 133.6/133.6 kB 12.1 MB/s eta 0:00:00 +Collecting markupsafe==2.0.1 + Downloading MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (30 kB) +Collecting packaging==21.3 + Downloading packaging-21.3-py3-none-any.whl (40 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40.8/40.8 kB 1.2 MB/s eta 0:00:00 +Collecting prettytable==3.0.0 + Downloading prettytable-3.0.0-py3-none-any.whl (24 kB) +Collecting pycurl==7.44.1 + Downloading pycurl-7.44.1.tar.gz (227 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 227.6/227.6 kB 9.9 MB/s eta 0:00:00 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Collecting pyparsing==3.0.6 + Downloading pyparsing-3.0.6-py3-none-any.whl (97 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 97.6/97.6 kB 7.3 MB/s eta 0:00:00 +Collecting python-magic==0.4.24 + Downloading python_magic-0.4.24-py2.py3-none-any.whl (12 kB) +Requirement already satisfied: pyyaml==5.4.1 in ./.local/lib/python3.8/site-packages (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 39)) (5.4.1) +Collecting requests==2.27.1 + Downloading requests-2.27.1-py2.py3-none-any.whl (63 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 63.1/63.1 kB 3.1 MB/s eta 0:00:00 +Collecting urllib3==1.26.8 + Downloading urllib3-1.26.8-py2.py3-none-any.whl (138 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 138.7/138.7 kB 5.9 MB/s eta 0:00:00 +Requirement already satisfied: verboselogs==1.7 in /usr/local/lib/python3.8/dist-packages (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 45)) (1.7) +Collecting wcwidth==0.2.5 + Downloading wcwidth-0.2.5-py2.py3-none-any.whl (30 kB) +Building wheels for collected packages: pycurl + Building wheel for pycurl (setup.py): started + Building wheel for pycurl (setup.py): finished with status 'done' + Created wheel for pycurl: filename=pycurl-7.44.1-cp38-cp38-linux_x86_64.whl size=322265 sha256=db30820dccde0dd85bb83a1046618f1bb90d5fd8ef9f6569cf550326a59c0204 + Stored in directory: /home/ubuntu/.cache/pip/wheels/a1/7e/55/e76d883b865ed83a76f1a0dc53af308f522db374be56d27909 +Successfully built pycurl +Installing collected packages: wcwidth, certifi, urllib3, python-magic, pyparsing, pycurl, prettytable, markupsafe, idna, click, charset-normalizer, requests, packaging, jinja2 + WARNING: The script normalizer is installed in '/home/ubuntu/.local/bin' which is not on PATH. + Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. +Successfully installed certifi-2021.10.8 charset-normalizer-2.0.10 click-8.0.3 idna-3.3 jinja2-3.0.3 markupsafe-2.0.1 packaging-21.3 prettytable-3.0.0 pycurl-7.44.1 pyparsing-3.0.6 python-magic-0.4.24 requests-2.27.1 urllib3-1.26.8 wcwidth-0.2.5 + +OSM client installed +OSM client assumes that OSM host is running in localhost (127.0.0.1). +In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file: + export OSM_HOSTNAME= +Track osmclient osmclient_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656515891&event=osmclient&operation=osmclient_ok&value=&comment=&tags= +Checking OSM health state... + +Bootstraping... 1 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 2 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 3 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 4 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 5 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 6 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 7 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 8 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 9 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 10 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 11 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 12 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 13 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 14 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 15 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +2 of 6 statefulsets starting: + mongodb-k8s 0/1 + prometheus 0/1 + + +Bootstraping... 16 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 17 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 18 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 19 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 20 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 21 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 22 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 23 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 24 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 25 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + + +Bootstraping... 26 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 27 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + + +Bootstraping... 28 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + ro 0/1 0 + + +Bootstraping... 29 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 30 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 31 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + + +Bootstraping... 32 attempts of 84 +2 of 9 deployments starting: + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 33 attempts of 84 +3 of 9 deployments starting: + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 34 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 35 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 36 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +===> Successful checks: 1/24 +Bootstraping... 1 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 2 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 3 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 4 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 5 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 6 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 7 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 8 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 9 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 10 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 11 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 12 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 13 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 14 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 15 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 16 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 17 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 18 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 19 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 20 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 21 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 22 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 23 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + ro 0/1 0 + + +Bootstraping... 24 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 25 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 26 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 27 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + + +Bootstraping... 28 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 29 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 30 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 31 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 32 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 33 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 34 attempts of 84 +3 of 9 deployments starting: + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 35 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 36 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 37 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 38 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 39 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 40 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 41 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 42 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 43 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 44 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 45 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 46 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 47 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 48 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 49 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 50 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 51 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 52 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 53 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 54 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + ro 0/1 0 + + +Bootstraping... 55 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + ro 0/1 0 + + +Bootstraping... 56 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + ro 0/1 0 + + +Bootstraping... 57 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + ro 0/1 0 + + +Bootstraping... 58 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 59 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 60 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 61 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 62 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 63 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 64 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 65 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + +===> Successful checks: 24/24 +SYSTEM IS READY +Track healthchecks after_healthcheck_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656516598&event=healthchecks&operation=after_healthcheck_ok&value=&comment=&tags= +875f163f-abfe-4dee-968f-62c12d2323f6 +3920eeca-eb30-4a60-bd43-552a37b4351d +Track final_ops add_local_k8scluster_ok: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656516604&event=final_ops&operation=add_local_k8scluster_ok&value=&comment=&tags= +Track end end: https://osm.etsi.org/InstallLog.php?&installation_id=1656514104-XydIoxsPFxDU1fLG&local_ts=1656516604&event=end&operation=end&value=&comment=&tags= +/etc/osm +/etc/osm/docker +/etc/osm/docker/ro.env +/etc/osm/docker/lcm.env +/etc/osm/docker/ro-db.env +/etc/osm/docker/pol.env +/etc/osm/docker/mon.env +/etc/osm/docker/nbi.env +/etc/osm/docker/keystone-db.env +/etc/osm/docker/cluster-config.yaml +/etc/osm/docker/metallb-config.yaml +/etc/osm/docker/osm_pods +/etc/osm/docker/osm_pods/keystone.yaml +/etc/osm/docker/osm_pods/lcm.yaml +/etc/osm/docker/osm_pods/ng-ui.yaml +/etc/osm/docker/osm_pods/ro.yaml +/etc/osm/docker/osm_pods/kafka.yaml +/etc/osm/docker/osm_pods/mon.yaml +/etc/osm/docker/osm_pods/prometheus.yaml +/etc/osm/docker/osm_pods/mysql.yaml +/etc/osm/docker/osm_pods/zookeeper.yaml +/etc/osm/docker/osm_pods/nbi.yaml +/etc/osm/docker/osm_pods/grafana.yaml +/etc/osm/docker/osm_pods/pol.yaml +/etc/osm/docker/keystone.env + +DONE diff --git a/_tmp/custom-osm-lcm/procedure.md b/_tmp/custom-osm-lcm/procedure.md new file mode 100644 index 0000000..3beb950 --- /dev/null +++ b/_tmp/custom-osm-lcm/procedure.md @@ -0,0 +1,287 @@ +Custom OSM LCM image +-------------------- +> what a schlep! + +Below are the steps to build and deploy a custom OSM LCM Docker image. +Mostly what OSM devs told Gabriele to do, plus some guesswork. Make +sure to keep hydrated b/c the procedure takes a few hours(1) and will +make you sweat alot :-) + +(1) my hardware: MacBook Pro 13'', 2 GHz Dual-Core Intel Core i5, +8 GB RAM. Make sure to shut down every app since the below procedure +needs alot of horse power to run decently. + +Notice at the moment we still can't get all the steps below to work. +Details [over here][fails]. + + +### Build OSM 11 VM + +We'll build and deploy our custom LCM image in an OSM release 11 VM. +Not explicitly mentioned by the OSM devs, but I don't see any other +easy way of doing that given I've got no clue about how they set up +their dev env. + +```console +$ multipass launch --name osm11 --cpus 2 --mem 6G --disk 40G 20.04 +$ multipass shell osm11 +% wget https://osm-download.etsi.org/ftp/osm-11.0-eleven/install_osm.sh +% chmod +x install_osm.sh +% ./install_osm.sh 2>&1 | tee install.log +``` + +Make sure to check you've ended up with a functional OSM cluster before +moving on---this includes *waiting for all the OSM pods to be up and +running*. Then shutdown and restart your freshly minted OSM VM. + +See: + +- https://osm.etsi.org/docs/user-guide/latest/03-installing-osm.html + + +### Set up source workspace + +OSM devs say: + +> Clone these repositories in your workspace on OSM host: +> cd workspace + +So that probably means create a `workspace` directory in your home on +the OSM VM you've just built... + + +```console +$ multipass shell osm11 +% mkdir workspace && cd workspace +``` + +Cloning repos + +```console +% git clone https://osm.etsi.org/gerrit/osm/LCM +% git clone https://osm.etsi.org/gerrit/osm/N2VC +% git clone https://osm.etsi.org/gerrit/osm/devops +% git clone https://osm.etsi.org/gerrit/osm/common +% git clone https://osm.etsi.org/gerrit/osm/IM +% git clone https://osm.etsi.org/gerrit/osm/RO +% git clone https://osm.etsi.org/gerrit/osm/NBI +``` + +Notice the RO and NBI repos weren't in the original instructions +they gave us, but then their build command requires them. So we +clone those two as well. + +Setting up OSM's git commit hook in each repo + + +```console +% curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > commit-msg +% chmod +x commit-msg +% for r in IM LCM N2VC NBI RO common devops; do cp commit-msg $r/.git/hooks/; done +``` + + +### Install additional deps + +OSM devs mentioned you've got to install QHttp too. + +```console +% devops/tools/local-build.sh --install-qhttpd +Attempting to open the browser failed, but the server might still work +This might happen if you're running this with sudo, a none graphical session, are lacking xdg-desktop portal support or have disabled the desktop interface +Attempting to serve files from /home/ubuntu/snap/qhttp/common, press control + c to exit +Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... +``` + +Hit `Ctrl+c` to exit. + + +### Build LCM image + +First you've got to build the artifacts that make up the LCM image + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,LCM,NBI stage-2 +``` + +Notice the original build command they gave us included RO too: + +```console +% devops/tools/local-build.sh --module common,IM,N2VC,RO,LCM,NBI stage-2 +``` + +but it looks like trying to build RO is a lost cause. Details +[over here][fails]. So we skip building RO for the moment. + +Then build a Docker image from the above components. The image name is +`opensourcemano/lcm:devel`. + +```console +% devops/tools/local-build.sh --module LCM stage-3 +``` + + +### Deploy LCM image + +Finally, patch your OSM deployment to use the dev image you've just +built: + +```console +% kubectl -n osm patch deployment lcm --patch '{"spec": {"template": {"spec": {"containers": [{"name": "lcm", "image": "opensourcemano/lcm:devel"}]}}}}' +deployment.apps/lcm patched +``` + +And as a sanity check: + +```console +% kubectl -n osm get deployment lcm -o yaml | grep 'image: open' + image: opensourcemano/lcm:devel + +% kubectl -n osm get pod | grep lcm +lcm-7cf9644d9b-zthgf 1/1 Running 0 2m33s +``` + + +### NS instance test + +Let's make sure the custom LCM image plays well with the rest of +OSM services in the cluster. We're going to add some repos, upload +packages and then create an NS instance using those packages. + +But first, let's configure the KNF infra for an isolated Kubernetes +cluster + +```console +% osm vim-create --name mylocation1 --user u --password p --tenant p \ +> --account_type dummy --auth_url http://localhost/dummy +c8779fff-ba39-4cbc-9259-48d02e374dc2 + +% osm k8scluster-add cluster --creds .kube/config --vim mylocation1 \ +> --k8s-nets '{k8s_net1: null}' --version "v1.15.12" \ +> --description="Isolated K8s cluster at mylocation1" +f68c5f89-901a-4bb7-bfdd-1c8a2a04bf10 +``` + +Adding K8s repos + +```console +% osm repo-add --type helm-chart --description "Bitnami repo" bitnami https://charts.bitnami.com/bitnami +fe28ae36-4400-4821-bd6a-442cbb379568 + +% osm repo-add --type helm-chart --description "Cetic repo" cetic https://cetic.github.io/helm-charts +26a3b21b-6e73-4029-9a1c-5393777e67ea + +% osm repo-add --type helm-chart --description "Elastic repo" elastic https://helm.elastic.co +19f21297-6988-46b9-933b-1479c0516bdb +``` + +Now upload the OSM OpenLDAP packages we're going to use to create NS +instances. To do that, open a terminal in this repo's root dir, then: + +```console +$ cd _tmp/osm-pkgs +$ multipass mount ./ osm11:/mnt/osm-pkgs +$ multipass shell osm11 +% cd /mnt/osm-pkgs + +% osm nfpkg-create openldap_knf.tar.gz +3540cf89-c764-425d-b771-62c9dd155ab8 + +% osm nspkg-create openldap_ns.tar.gz +1c803c06-33d8-40ac-96f7-7d63a647a846 +``` + +Give OSM a couple of minutes to process all that stuff. Then log into +the OSM Web UI. You should be able to see: VIM account, K8s cluster, +the three repos we added as well as the NS and VNF packages. + +After checking everything is hunky-dory, go back to the OSM VM shell. +Time to create that LDAP NS instance we've all been waiting for. + +```console +% osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account mylocation1 +5d5d5028-0e4a-4a76-906a-cd4a3ad1d210 +``` + +Again wait a few minutes. Eventually the OSM Web UI should reflect +the OpenLDAP NS instance. If you go back to the shell, you can check +there's a new namespace in the K8s cluster with a brand new OpenLDAP +pod in it: + +```console +% kubectl get ns +NAME STATUS AGE +94c3f4fb-304f-4290-ab95-0319d84b48b6 Active 2m12s +... +osm Active 45h + +% kubectl -n 94c3f4fb-304f-4290-ab95-0319d84b48b6 get pod +NAME READY STATUS RESTARTS AGE +stable-openldap-1-2-7-0084381388-6f658b9545-q7k5m 1/1 Running 0 2m15s + +% kubectl -n 94c3f4fb-304f-4290-ab95-0319d84b48b6 logs stable-openldap-1-2-7-0084381388-6f658b9545-q7k5m +... +Start OpenLDAP... +... +First start is done... +... +``` + +Happy days? + + +### Grief down the line? + +Notice we didn't build RO earlier. While we manage to build and deploy +LCM in the end, the LCM image might have some missing components, i.e. +those the build process supposedly fetched from RO. So we've got to +test the custom image thoroughly to make sure it works for our use +case. + + +### From the horse's mouth + +For the record, these are the actual instructions Gabriele got from +the OSM devs. Copy-paste from the chat, original text, no edits. + +After installing OSM via the script on the appropriate VM, this is +how to build the LCM image: + +1. Clone these repositories in your workspace on OSM host: + +cd workspace + +git clone "https://osm.etsi.org/gerrit/osm/LCM" && (cd "LCM" && curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > .git/hooks/commit-msg ; chmod +x .git/hooks/commit-msg) + +git clone "https://osm.etsi.org/gerrit/osm/N2VC" && (cd "N2VC" && curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > .git/hooks/commit-msg ; chmod +x .git/hooks/commit-msg) + +git clone "https://osm.etsi.org/gerrit/osm/devops" && (cd "devops" && curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > .git/hooks/commit-msg ; chmod +x .git/hooks/commit-msg) + +git clone "https://osm.etsi.org/gerrit/osm/common" && (cd "common" && curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > .git/hooks/commit-msg ; chmod +x .git/hooks/commit-msg) + +git clone "https://osm.etsi.org/gerrit/osm/IM" && (cd "IM" && curl https://osm.etsi.org/gerrit/tools/hooks/commit-msg > .git/hooks/commit-msg ; chmod +x .git/hooks/commit-msg) + + +2. Install HTTP server: + +devops/tools/local-build.sh --install-qhttpd + + +3. Build artifacts: + +devops/tools/local-build.sh --module common,IM,N2VC,RO,LCM,NBI stage-2 + + +4. Build image (this generates a “devel” tagged image using previous artifacts): + +devops/tools/local-build.sh --module LCM stage-3 + + +5. Patch deployment to use “devel” image: + +kubectl -n osm patch deployment lcm --patch '{"spec": {"template": {"spec": {"containers": [{"name": "lcm", "image": "opensourcemano/lcm:devel"}]}}}}' + + + + +[fails]: ./failed-steps.md diff --git a/_tmp/osm-install-issues/README.md b/_tmp/osm-install-issues/README.md new file mode 100644 index 0000000..cfcb86f --- /dev/null +++ b/_tmp/osm-install-issues/README.md @@ -0,0 +1,46 @@ +OSM installation issues +----------------------- +> ...documenting the struggle for posterity. + +I've captured some of the OSM install sessions that resulted in broken +installs and parked them in this dir just in case I ran into the same +issues again I've got an idea how to fix stuff quickly. One sore point +was the PGP keys---quite a few of them. Here's an example of how to fix +the K8s ones, the others are similar: + +- https://stackoverflow.com/questions/49877401 + +Also notice the install script was broken. See the `multipass*` and +`patched*` scripts in the `osm-install` dir. It looks like the OSM +guys fixed it though: + +- https://osm.etsi.org/gitlab/osm/devops/-/commit/fdbe776e9bb9e43f7d4dc0f8c023b93d258666e2 + + +### June 2022 Update + +So I rebuilt another OSM 10 VM at the beginning of Jun 2022. Here's +how + +```console +$ multipass launch --name osm --cpus 2 --mem 6G --disk 40G 18.04 +$ multipass shell osm +% wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +% chmod +x install_osm.sh +% ./install_osm.sh 2>&1 | tee install.log +% exit +``` + +The good news is that this time I didn't have to patch the install +script---yay! But...the `osm` client didn't install properly and it +looks like something else may be broken too. In fact, you can't run +the client b/c `pycurl` is missing and when logging into the UI I +couldn't manage to create repos and K8s clusters. Install log saved +to: + +- [broken-osm10.install-log.jun2022.log][jun2022-log] + + + + +[jun2022-log]: ./broken-osm10.install-log.jun2022.log \ No newline at end of file diff --git a/_tmp/osm-install-issues/broken-osm10.install-log.jun2022.log b/_tmp/osm-install-issues/broken-osm10.install-log.jun2022.log new file mode 100644 index 0000000..d5d06c2 --- /dev/null +++ b/_tmp/osm-install-issues/broken-osm10.install-log.jun2022.log @@ -0,0 +1,3688 @@ +Checking required packages: software-properties-common apt-transport-https +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease [4086 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:7 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2304 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:9 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [401 kB] +Get:10 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [786 kB] +Get:11 http://security.ubuntu.com/ubuntu bionic-security/restricted Translation-en [108 kB] +Get:12 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1212 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:15 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [279 kB] +Get:16 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [19.0 kB] +Get:17 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [3836 B] +Get:18 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 Packages [482 B] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2646 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1826 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [396 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [24.9 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6012 B] +Get:24 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.8 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [5016 B] +Get:26 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [11.6 kB] +Get:27 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [5864 B] +Fetched 24.1 MB in 5s (4497 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. +Need to get 724 kB of archives. +After this operation, 5550 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 osm-devops all 10.1.1-1 [724 kB] +Fetched 724 kB in 0s (2369 kB/s) +Selecting previously unselected package osm-devops. +(Reading database ... 60493 files and directories currently installed.) +Preparing to unpack .../osm-devops_10.1.1-1_all.deb ... +Unpacking osm-devops (10.1.1-1) ... +Setting up osm-devops (10.1.1-1) ... +Checking required packages: git wget curl tar +2022-06-16T11:22:46+02:00 INFO Waiting for automatic snapd restart... +jq 1.5+dfsg-1 from Michael Vogt (mvo) installed +## Thu Jun 16 11:23:01 CEST 2022 source: logging sourced +## Thu Jun 16 11:23:01 CEST 2022 source: config sourced +## Thu Jun 16 11:23:01 CEST 2022 source: container sourced +## Thu Jun 16 11:23:01 CEST 2022 source: git_functions sourced +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? y +Installing lightweight build of OSM +Checking required packages: snapd +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-lxd-inotify.conf ... +fs.inotify.max_user_instances = 1024 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 1 +net.ipv4.conf.all.rp_filter = 1 +net.ipv4.tcp_syncookies = 1 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.all.promote_secondaries = 1 +net.core.default_qdisc = fq_codel +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +sysctl: setting key "net.core.bpf_jit_limit": Invalid argument +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following packages will be REMOVED: + liblxc-common* liblxc1* lxcfs* lxd* lxd-client* +0 upgraded, 0 newly installed, 5 to remove and 0 not upgraded. +After this operation, 34.2 MB disk space will be freed. +(Reading database ... 61629 files and directories currently installed.) +Removing lxd (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd dnsmasq configuration +Removing lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd-client (3.0.3-0ubuntu1~18.04.2) ... +Removing liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc1 (3.0.3-0ubuntu1~18.04.1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +(Reading database ... 61383 files and directories currently installed.) +Purging configuration files for liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxd (3.0.3-0ubuntu1~18.04.2) ... +Purging configuration files for lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Processing triggers for systemd (237-3ubuntu10.53) ... +Processing triggers for ureadahead (0.100.0-21) ... +lxd (4.0/stable) 4.0.9-8e2046b from Canonical** installed +To start your first container, try: lxc launch ubuntu:20.04 +Or for a virtual machine: lxc launch ubuntu:20.04 --vm + +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20211016~18.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.96.24.32.18). +software-properties-common set to manually installed. +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. +Need to get 4348 B of archives. +After this operation, 154 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 apt-transport-https all 1.6.14 [4348 B] +Fetched 4348 B in 0s (22.6 kB/s) +Selecting previously unselected package apt-transport-https. +(Reading database ... 61366 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_1.6.14_all.deb ... +Unpacking apt-transport-https (1.6.14) ... +Setting up apt-transport-https (1.6.14) ... +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:2 https://download.docker.com/linux/ubuntu bionic InRelease [64.4 kB] +Hit:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:5 https://download.docker.com/linux/ubuntu bionic/stable amd64 Packages [26.4 kB] +Hit:6 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Fetched 90.9 kB in 1s (138 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + libltdl7 pigz +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +Recommended packages: + slirp4netns +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin libltdl7 pigz +0 upgraded, 7 newly installed, 0 to remove and 0 not upgraded. +Need to get 101 MB of archives. +After this operation, 422 MB of additional disk space will be used. +Get:1 https://download.docker.com/linux/ubuntu bionic/stable amd64 containerd.io amd64 1.6.6-1 [28.1 MB] +Get:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 libltdl7 amd64 2.4.6-2 [38.8 kB] +Get:4 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-cli amd64 5:20.10.17~3-0~ubuntu-bionic [40.6 MB] +Get:5 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce amd64 5:20.10.17~3-0~ubuntu-bionic [21.0 MB] +Get:6 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-rootless-extras amd64 5:20.10.17~3-0~ubuntu-bionic [8163 kB] +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-scan-plugin amd64 0.17.0~ubuntu-bionic [3521 kB] +Fetched 101 MB in 3s (36.3 MB/s) +Selecting previously unselected package pigz. +(Reading database ... 61370 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.6.6-1_amd64.deb ... +Unpacking containerd.io (1.6.6-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.17~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.17.0~ubuntu-bionic_amd64.deb ... +Unpacking docker-scan-plugin (0.17.0~ubuntu-bionic) ... +Selecting previously unselected package libltdl7:amd64. +Preparing to unpack .../6-libltdl7_2.4.6-2_amd64.deb ... +Unpacking libltdl7:amd64 (2.4.6-2) ... +Setting up containerd.io (1.6.6-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-rootless-extras (5:20.10.17~3-0~ubuntu-bionic) ... +Setting up docker-scan-plugin (0.17.0~ubuntu-bionic) ... +Setting up libltdl7:amd64 (2.4.6-2) ... +Setting up docker-ce-cli (5:20.10.17~3-0~ubuntu-bionic) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce (5:20.10.17~3-0~ubuntu-bionic) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +Processing triggers for systemd (237-3ubuntu10.53) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.17 + API version: 1.41 + Go version: go1.17.11 + Git commit: 100c701 + Built: Mon Jun 6 23:02:56 2022 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.17 + API version: 1.41 (minimum version 1.12) + Go version: go1.17.11 + Git commit: a89b842 + Built: Mon Jun 6 23:01:02 2022 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.6.6 + GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 + runc: + Version: 1.1.2 + GitCommit: v1.1.2-0-ga916309 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Creating folders for installation +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (1.6.14). +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:2 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:6 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [56.5 kB] +Fetched 65.9 kB in 1s (76.9 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Hit:6 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Fetched 9383 B in 1s (12.4 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + conntrack cri-tools kubernetes-cni socat +The following NEW packages will be installed: + conntrack cri-tools kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 7 newly installed, 0 to remove and 0 not upgraded. +Need to get 74.6 MB of archives. +After this operation, 323 MB of additional disk space will be used. +Get:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 conntrack amd64 1:1.4.4+snapshot20161117-6ubuntu2 [30.6 kB] +Get:1 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.24.0-00 [12.2 MB] +Get:4 http://archive.ubuntu.com/ubuntu bionic/main amd64 socat amd64 1.7.3.2-2ubuntu2 [342 kB] +Get:3 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.23.3-00 [19.5 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.23.3-00 [8929 kB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.23.3-00 [8580 kB] +Fetched 74.6 MB in 2s (29.9 MB/s) +Selecting previously unselected package conntrack. +(Reading database ... 61622 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.4+snapshot20161117-6ubuntu2_amd64.deb ... +Unpacking conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.24.0-00_amd64.deb ... +Unpacking cri-tools (1.24.0-00) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../2-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../3-socat_1.7.3.2-2ubuntu2_amd64.deb ... +Unpacking socat (1.7.3.2-2ubuntu2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../4-kubelet_1.23.3-00_amd64.deb ... +Unpacking kubelet (1.23.3-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../5-kubectl_1.23.3-00_amd64.deb ... +Unpacking kubectl (1.23.3-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../6-kubeadm_1.23.3-00_amd64.deb ... +Unpacking kubeadm (1.23.3-00) ... +Setting up conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up cri-tools (1.24.0-00) ... +Setting up socat (1.7.3.2-2ubuntu2) ... +Setting up kubelet (1.23.3-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubectl (1.23.3-00) ... +Setting up kubeadm (1.23.3-00) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... + KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs" +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +I0616 11:24:49.882192 10753 version.go:255] remote version is much newer: v1.24.1; falling back to: stable-1.23 +[init] Using Kubernetes version: v1.23.7 +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local osm2] and IPs [10.96.0.1 192.168.64.22] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [localhost osm2] and IPs [192.168.64.22 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [localhost osm2] and IPs [192.168.64.22 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Starting the kubelet +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 10.521247 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster +NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently. +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm2 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] +[mark-control-plane] Marking the node osm2 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: tk7288.rgyc2cwzpwcgpi3n +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +Alternatively, if you are the root user, you can run: + + export KUBECONFIG=/etc/kubernetes/admin.conf + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.22:6443 --token tk7288.rgyc2cwzpwcgpi3n \ + --discovery-token-ca-cert-hash sha256:e14f28f2822bd3672329697a551c08df6423e93a1a55a008ffb1042964634abe +Error from server (NotFound): namespaces "osm" not found +Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds created +node/osm2 untainted +Helm3 is not installed, installing ... + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 13.2M 100 13.2M 0 0 32.2M 0 --:--:-- --:--:-- --:--:-- 32.3M +linux-amd64/ +linux-amd64/helm +linux-amd64/LICENSE +linux-amd64/README.md +"stable" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +Installing open-iscsi +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:4 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Hit:6 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +open-iscsi is already the newest version (2.0.874-5ubuntu2.11). +open-iscsi set to manually installed. +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 3 not upgraded. +Synchronizing state of iscsid.service with SysV service script with /lib/systemd/systemd-sysv-install. +Executing: /lib/systemd/systemd-sysv-install enable iscsid +Installing OpenEBS +"openebs" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "openebs" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +NAME: openebs +LAST DEPLOYED: Thu Jun 16 11:26:10 2022 +NAMESPACE: openebs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +Successfully installed OpenEBS. + +Check the status by running: kubectl get pods -n openebs + +The default values will install NDM and enable OpenEBS hostpath and device +storage engines along with their default StorageClasses. Use `kubectl get sc` +to see the list of installed OpenEBS StorageClasses. + +**Note**: If you are upgrading from the older helm chart that was using cStor +and Jiva (non-csi) volumes, you will have to run the following command to include +the older provisioners: + +helm upgrade openebs openebs/openebs \ + --namespace openebs \ + --set legacy.enabled=true \ + --reuse-values + +For other engines, you will need to perform a few more additional steps to +enable the engine, configure the engines (e.g. creating pools) and create +StorageClasses. + +For example, cStor can be enabled using commands like: + +helm upgrade openebs openebs/openebs \ + --namespace openebs \ + --set cstor.enabled=true \ + --reuse-values + +For more information, +- view the online documentation at https://openebs.io/docs or +- connect with an active community on Kubernetes slack #openebs channel. +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +openebs openebs 1 2022-06-16 11:26:10.637547496 +0200 CEST deployed openebs-3.1.0 3.1.0 +Waiting for storageclass +Storageclass available +storageclass.storage.k8s.io/openebs-hostpath patched +configInline: + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.64.22/32 +"metallb" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "metallb" chart repository +...Successfully got an update from the "openebs" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +W0616 11:26:16.600656 13801 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0616 11:26:16.603418 13801 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0616 11:26:16.686741 13801 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +W0616 11:26:16.686778 13801 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +NAME: metallb +LAST DEPLOYED: Thu Jun 16 11:26:15 2022 +NAMESPACE: metallb-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +MetalLB is now running in the cluster. +LoadBalancer Services in your cluster are now available on the IPs you +defined in MetalLB's configuration: + +config: + address-pools: + - addresses: + - 192.168.64.22/32 + name: default + protocol: layer2 + +To see IP assignments, try `kubectl get services`. + +Bootstraping... 1 checks of 100 +OpenEBS: Waiting for 2 of 2 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 1 of 1 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 + + +Bootstraping... 2 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 +openebs-ndm-qbzpz 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 3 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 +openebs-ndm-qbzpz 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 4 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 +openebs-ndm-qbzpz 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 5 checks of 100 +OpenEBS: Waiting for 3 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 +openebs-ndm-qbzpz 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 6 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 7 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 8 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 9 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 10 checks of 100 +OpenEBS: Waiting for 2 of 3 pods to be ready: +openebs-localpv-provisioner-7965f77fb6-qxghf 0/1 +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 11 checks of 100 +OpenEBS: Waiting for 1 of 3 pods to be ready: +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 12 checks of 100 +OpenEBS: Waiting for 1 of 3 pods to be ready: +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 13 checks of 100 +OpenEBS: Waiting for 1 of 3 pods to be ready: +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 14 checks of 100 +OpenEBS: Waiting for 1 of 3 pods to be ready: +openebs-ndm-operator-687cf9466c-ghjbd 0/1 + +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 15 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 16 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 17 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 18 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 19 checks of 100 +MetalLB: Waiting for 2 of 2 pods to be ready: +metallb-controller-777cbcf64f-k6f55 0/1 +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 20 checks of 100 +MetalLB: Waiting for 1 of 2 pods to be ready: +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 21 checks of 100 +MetalLB: Waiting for 1 of 2 pods to be ready: +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 22 checks of 100 +MetalLB: Waiting for 1 of 2 pods to be ready: +metallb-speaker-7dp2s 0/1 + + +Bootstraping... 23 checks of 100 +MetalLB: Waiting for 1 of 2 pods to be ready: +metallb-speaker-7dp2s 0/1 + +===> Successful checks: 10/10 +K8S CLUSTER IS READY +Installing juju +juju (2.9/stable) 2.9.31 from Canonical** installed +no crontab for ubuntu ++ LAYER_BASIC='gcc build-essential python3-pip python3-setuptools python3-yaml' ++ TRUSTY_PACKAGES=python-virtualenv ++ XENIAL_PACKAGES=virtualenv ++ BIONIC_PACKAGES=virtualenv ++ DOWNLOAD_PACKAGES= ++ CLOUD_INIT_PACKAGES='curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan' ++ PACKAGES='gcc build-essential python3-pip python3-setuptools python3-yaml ' +++ juju version +Since Juju 2 is being run for the first time, it has downloaded the latest public cloud information. ++ JUJU_FULL_VERSION=2.9.31-ubuntu-amd64 +++ echo 2.9.31-ubuntu-amd64 +++ awk -F- '{print $1}' ++ JUJU_VERSION=2.9.31 +++ echo 2.9.31-ubuntu-amd64 +++ awk -F- '{print $2}' ++ OS_VERSION=ubuntu +++ echo 2.9.31-ubuntu-amd64 +++ awk -F- '{print $3}' ++ ARCH=amd64 ++ '[' 1 == 0 ']' ++ '[' 0 == 1 ']' ++ '[' 1 == 1 ']' ++ cache xenial virtualenv ++ series=xenial ++ container=juju-xenial-base ++ alias=juju/xenial/amd64 ++ lxc delete juju-xenial-base -f +Error: Not Found ++ true ++ lxc image copy ubuntu:xenial local: --alias clean-xenial +Image copied successfully! ++ lxc launch ubuntu:xenial juju-xenial-base +Creating juju-xenial-base +Starting juju-xenial-base ++ sleep 15 ++ lxc exec juju-xenial-base -- apt-get update -y +Get:1 http://security.ubuntu.com/ubuntu xenial-security InRelease [99.8 kB] +Hit:2 http://archive.ubuntu.com/ubuntu xenial InRelease +Get:3 http://archive.ubuntu.com/ubuntu xenial-updates InRelease [99.8 kB] +Get:4 http://security.ubuntu.com/ubuntu xenial-security/main Translation-en [360 kB] +Get:5 http://security.ubuntu.com/ubuntu xenial-security/universe amd64 Packages [785 kB] +Get:6 http://security.ubuntu.com/ubuntu xenial-security/universe Translation-en [225 kB] +Get:7 http://security.ubuntu.com/ubuntu xenial-security/multiverse amd64 Packages [7864 B] +Get:8 http://security.ubuntu.com/ubuntu xenial-security/multiverse Translation-en [2672 B] +Get:9 http://archive.ubuntu.com/ubuntu xenial-backports InRelease [97.4 kB] +Get:10 https://esm.ubuntu.com/infra/ubuntu xenial-infra-security InRelease [7518 B] +Get:11 http://archive.ubuntu.com/ubuntu xenial/universe amd64 Packages [7532 kB] +Get:12 https://esm.ubuntu.com/infra/ubuntu xenial-infra-updates InRelease [7475 B] +Get:13 https://esm.ubuntu.com/infra/ubuntu xenial-infra-security/main amd64 Packages [462 kB] +Get:14 http://archive.ubuntu.com/ubuntu xenial/universe Translation-en [4354 kB] +Get:15 http://archive.ubuntu.com/ubuntu xenial/multiverse amd64 Packages [144 kB] +Get:16 http://archive.ubuntu.com/ubuntu xenial/multiverse Translation-en [106 kB] +Get:17 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages [2049 kB] +Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main Translation-en [461 kB] +Get:19 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 Packages [1219 kB] +Get:20 http://archive.ubuntu.com/ubuntu xenial-updates/universe Translation-en [358 kB] +Get:21 http://archive.ubuntu.com/ubuntu xenial-updates/multiverse amd64 Packages [22.6 kB] +Get:22 http://archive.ubuntu.com/ubuntu xenial-updates/multiverse Translation-en [8476 B] +Get:23 http://archive.ubuntu.com/ubuntu xenial-backports/main amd64 Packages [9812 B] +Get:24 http://archive.ubuntu.com/ubuntu xenial-backports/main Translation-en [4456 B] +Get:25 http://archive.ubuntu.com/ubuntu xenial-backports/universe amd64 Packages [11.3 kB] +Get:26 http://archive.ubuntu.com/ubuntu xenial-backports/universe Translation-en [4476 B] +Fetched 18.4 MB in 6s (3014 kB/s) +Reading package lists... ++ lxc exec juju-xenial-base -- apt-get upgrade -y +Reading package lists... +Building dependency tree... +Reading state information... +Calculating upgrade... +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following packages will be upgraded: + ubuntu-advantage-tools +1 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. +Need to get 718 kB of archives. +After this operation, 140 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ubuntu-advantage-tools amd64 27.8~16.04.1 [718 kB] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 718 kB in 0s (3570 kB/s) +(Reading database ... 25907 files and directories currently installed.) +Preparing to unpack .../ubuntu-advantage-tools_27.8~16.04.1_amd64.deb ... +Unpacking ubuntu-advantage-tools (27.8~16.04.1) over (27.2.2~16.04.1) ... +Processing triggers for man-db (2.7.5-1) ... +Setting up ubuntu-advantage-tools (27.8~16.04.1) ... +Installing new version of config file /etc/logrotate.d/ubuntu-advantage-tools ... +Installing new version of config file /etc/ubuntu-advantage/help_data.yaml ... +Installing new version of config file /etc/ubuntu-advantage/uaclient.conf ... ++ lxc exec juju-xenial-base -- apt-get install -y curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan gcc build-essential python3-pip python3-setuptools python3-yaml virtualenv +Reading package lists... +Building dependency tree... +Reading state information... +python3-yaml is already the newest version (3.11-3build1). +tmux is already the newest version (2.1-3build1). +curl is already the newest version (7.47.0-1ubuntu2.19). +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following additional packages will be installed: + binutils cloud-image-utils cpp cpp-5 dpkg-dev fakeroot g++ g++-5 gcc-5 + genisoimage libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan2 libatomic1 libboost-iostreams1.58.0 + libboost-random1.58.0 libboost-system1.58.0 libboost-thread1.58.0 + libc-dev-bin libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev + libfakeroot libfile-fcntllock-perl libgcc-5-dev libgomp1 libiscsi2 libisl15 + libitm1 liblsan0 libmpc3 libmpx0 libnspr4 libnss3 libnss3-nssdb + libpython3-dev libpython3.5-dev libquadmath0 librados2 librbd1 + libstdc++-5-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-dev python3-virtualenv python3-wheel + python3.5-dev qemu-block-extra qemu-utils sharutils +Suggested packages: + binutils-doc cloud-utils-euca cpp-doc gcc-5-locales debian-keyring + g++-multilib g++-5-multilib gcc-5-doc libstdc++6-5-dbg gcc-multilib autoconf + automake libtool flex bison gdb gcc-doc gcc-5-multilib libgcc1-dbg + libgomp1-dbg libitm1-dbg libatomic1-dbg libasan2-dbg liblsan0-dbg + libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx0-dbg libquadmath0-dbg + wodim cdrkit-doc glibc-doc libstdc++-5-doc make-doc python-setuptools-doc + debootstrap bsd-mailx | mailx +The following NEW packages will be installed: + binutils bridge-utils build-essential cloud-image-utils cloud-utils cpp + cpp-5 cpu-checker dpkg-dev fakeroot g++ g++-5 gcc gcc-5 genisoimage libaio1 + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan2 libatomic1 libboost-iostreams1.58.0 libboost-random1.58.0 + libboost-system1.58.0 libboost-thread1.58.0 libc-dev-bin libc6-dev libcc1-0 + libcilkrts5 libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl + libgcc-5-dev libgomp1 libiscsi2 libisl15 libitm1 liblsan0 libmpc3 libmpx0 + libnspr4 libnss3 libnss3-nssdb libpython3-dev libpython3.5-dev libquadmath0 + librados2 librbd1 libstdc++-5-dev libtsan0 libubsan0 linux-libc-dev make + manpages-dev msr-tools python-pip-whl python3-dev python3-pip + python3-setuptools python3-virtualenv python3-wheel python3.5-dev + qemu-block-extra qemu-utils sharutils ubuntu-fan virtualenv +0 upgraded, 68 newly installed, 0 to remove and 0 not upgraded. +Need to get 84.4 MB of archives. +After this operation, 226 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu xenial/main amd64 libiscsi2 amd64 1.12.0-2 [51.5 kB] +Get:2 http://archive.ubuntu.com/ubuntu xenial/main amd64 libmpc3 amd64 1.0.3-1 [39.7 kB] +Get:3 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 binutils amd64 2.26.1-1ubuntu1~16.04.8 [2312 kB] +Get:4 http://archive.ubuntu.com/ubuntu xenial/main amd64 bridge-utils amd64 1.5-9ubuntu1 [28.6 kB] +Get:5 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc-dev-bin amd64 2.23-0ubuntu11.3 [68.6 kB] +Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 linux-libc-dev amd64 4.4.0-210.242 [832 kB] +Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc6-dev amd64 2.23-0ubuntu11.3 [2083 kB] +Get:8 http://archive.ubuntu.com/ubuntu xenial/main amd64 libisl15 amd64 0.16.1-1 [524 kB] +Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cpp-5 amd64 5.4.0-6ubuntu1~16.04.12 [7783 kB] +Get:10 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpp amd64 4:5.3.1-1ubuntu1 [27.7 kB] +Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libcc1-0 amd64 5.4.0-6ubuntu1~16.04.12 [38.8 kB] +Get:12 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgomp1 amd64 5.4.0-6ubuntu1~16.04.12 [55.2 kB] +Get:13 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libitm1 amd64 5.4.0-6ubuntu1~16.04.12 [27.4 kB] +Get:14 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libatomic1 amd64 5.4.0-6ubuntu1~16.04.12 [8892 B] +Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libasan2 amd64 5.4.0-6ubuntu1~16.04.12 [265 kB] +Get:16 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 liblsan0 amd64 5.4.0-6ubuntu1~16.04.12 [105 kB] +Get:17 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libtsan0 amd64 5.4.0-6ubuntu1~16.04.12 [244 kB] +Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libubsan0 amd64 5.4.0-6ubuntu1~16.04.12 [95.3 kB] +Get:19 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libcilkrts5 amd64 5.4.0-6ubuntu1~16.04.12 [40.0 kB] +Get:20 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libmpx0 amd64 5.4.0-6ubuntu1~16.04.12 [9762 B] +Get:21 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libquadmath0 amd64 5.4.0-6ubuntu1~16.04.12 [131 kB] +Get:22 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgcc-5-dev amd64 5.4.0-6ubuntu1~16.04.12 [2239 kB] +Get:23 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 gcc-5 amd64 5.4.0-6ubuntu1~16.04.12 [8612 kB] +Get:24 http://archive.ubuntu.com/ubuntu xenial/main amd64 gcc amd64 4:5.3.1-1ubuntu1 [5244 B] +Get:25 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libstdc++-5-dev amd64 5.4.0-6ubuntu1~16.04.12 [1428 kB] +Get:26 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 g++-5 amd64 5.4.0-6ubuntu1~16.04.12 [8430 kB] +Get:27 http://archive.ubuntu.com/ubuntu xenial/main amd64 g++ amd64 4:5.3.1-1ubuntu1 [1504 B] +Get:28 http://archive.ubuntu.com/ubuntu xenial/main amd64 make amd64 4.1-6 [151 kB] +Get:29 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdpkg-perl all 1.18.4ubuntu1.7 [195 kB] +Get:30 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 dpkg-dev all 1.18.4ubuntu1.7 [584 kB] +Get:31 http://archive.ubuntu.com/ubuntu xenial/main amd64 build-essential amd64 12.1ubuntu2 [4758 B] +Get:32 http://archive.ubuntu.com/ubuntu xenial/main amd64 msr-tools amd64 1.3-2 [10.6 kB] +Get:33 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpu-checker amd64 0.7-0ubuntu7 [6862 B] +Get:34 http://archive.ubuntu.com/ubuntu xenial/main amd64 libfakeroot amd64 1.20.2-1ubuntu1 [25.5 kB] +Get:35 http://archive.ubuntu.com/ubuntu xenial/main amd64 fakeroot amd64 1.20.2-1ubuntu1 [61.8 kB] +Get:36 http://archive.ubuntu.com/ubuntu xenial/main amd64 genisoimage amd64 9:1.1.11-3ubuntu1 [316 kB] +Get:37 http://archive.ubuntu.com/ubuntu xenial/main amd64 libaio1 amd64 0.3.110-2 [6356 B] +Get:38 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:39 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-diff-xs-perl amd64 0.04-4build1 [11.0 kB] +Get:40 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:41 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-iostreams1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [29.0 kB] +Get:42 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-system1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [9146 B] +Get:43 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-random1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [11.7 kB] +Get:44 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-thread1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [47.0 kB] +Get:45 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libexpat1-dev amd64 2.1.0-7ubuntu0.16.04.5 [115 kB] +Get:46 http://archive.ubuntu.com/ubuntu xenial/main amd64 libfile-fcntllock-perl amd64 0.22-3 [32.0 kB] +Get:47 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnspr4 amd64 2:4.13.1-0ubuntu0.16.04.1 [112 kB] +Get:48 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnss3-nssdb all 2:3.28.4-0ubuntu0.16.04.14 [10.6 kB] +Get:49 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnss3 amd64 2:3.28.4-0ubuntu0.16.04.14 [1232 kB] +Get:50 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libpython3.5-dev amd64 3.5.2-2ubuntu0~16.04.13 [37.3 MB] +Get:51 http://archive.ubuntu.com/ubuntu xenial/main amd64 libpython3-dev amd64 3.5.1-3 [6926 B] +Get:52 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 librados2 amd64 10.2.11-0ubuntu0.16.04.3 [1651 kB] +Get:53 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 librbd1 amd64 10.2.11-0ubuntu0.16.04.3 [2198 kB] +Get:54 http://archive.ubuntu.com/ubuntu xenial/main amd64 manpages-dev all 4.04-2 [2048 kB] +Get:55 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python-pip-whl all 8.1.1-2ubuntu0.6 [1112 kB] +Get:56 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3.5-dev amd64 3.5.2-2ubuntu0~16.04.13 [413 kB] +Get:57 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-dev amd64 3.5.1-3 [1186 B] +Get:58 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python3-pip all 8.1.1-2ubuntu0.6 [109 kB] +Get:59 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-setuptools all 20.7.0-1 [88.0 kB] +Get:60 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python3-virtualenv all 15.0.1+ds-3ubuntu1.1 [43.3 kB] +Get:61 http://archive.ubuntu.com/ubuntu xenial/universe amd64 python3-wheel all 0.29.0-1 [48.1 kB] +Get:62 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 qemu-block-extra amd64 1:2.5+dfsg-5ubuntu10.51 [32.4 kB] +Get:63 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 qemu-utils amd64 1:2.5+dfsg-5ubuntu10.51 [582 kB] +Get:64 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 sharutils amd64 1:4.15.2-1ubuntu0.1 [148 kB] +Get:65 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ubuntu-fan all 0.12.8~16.04.3 [35.1 kB] +Get:66 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 virtualenv all 15.0.1+ds-3ubuntu1.1 [4346 B] +Get:67 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cloud-image-utils all 0.27-0ubuntu25.2 [16.2 kB] +Get:68 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cloud-utils all 0.27-0ubuntu25.2 [1514 B] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 84.4 MB in 2s (30.4 MB/s) +Selecting previously unselected package libiscsi2:amd64. +(Reading database ... 25928 files and directories currently installed.) +Preparing to unpack .../libiscsi2_1.12.0-2_amd64.deb ... +Unpacking libiscsi2:amd64 (1.12.0-2) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../libmpc3_1.0.3-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.0.3-1) ... +Selecting previously unselected package binutils. +Preparing to unpack .../binutils_2.26.1-1ubuntu1~16.04.8_amd64.deb ... +Unpacking binutils (2.26.1-1ubuntu1~16.04.8) ... +Selecting previously unselected package bridge-utils. +Preparing to unpack .../bridge-utils_1.5-9ubuntu1_amd64.deb ... +Unpacking bridge-utils (1.5-9ubuntu1) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../libc-dev-bin_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc-dev-bin (2.23-0ubuntu11.3) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../linux-libc-dev_4.4.0-210.242_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.4.0-210.242) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../libc6-dev_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc6-dev:amd64 (2.23-0ubuntu11.3) ... +Selecting previously unselected package libisl15:amd64. +Preparing to unpack .../libisl15_0.16.1-1_amd64.deb ... +Unpacking libisl15:amd64 (0.16.1-1) ... +Selecting previously unselected package cpp-5. +Preparing to unpack .../cpp-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking cpp-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package cpp. +Preparing to unpack .../cpp_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking cpp (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../libcc1-0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libcc1-0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../libgomp1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libgomp1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../libitm1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libitm1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../libatomic1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libatomic1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libasan2:amd64. +Preparing to unpack .../libasan2_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libasan2:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../liblsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking liblsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../libtsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libtsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../libubsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libubsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../libcilkrts5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libcilkrts5:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libmpx0:amd64. +Preparing to unpack .../libmpx0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libmpx0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../libquadmath0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libquadmath0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libgcc-5-dev:amd64. +Preparing to unpack .../libgcc-5-dev_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libgcc-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package gcc-5. +Preparing to unpack .../gcc-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking gcc-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package gcc. +Preparing to unpack .../gcc_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking gcc (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package libstdc++-5-dev:amd64. +Preparing to unpack .../libstdc++-5-dev_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libstdc++-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package g++-5. +Preparing to unpack .../g++-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking g++-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package g++. +Preparing to unpack .../g++_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking g++ (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package make. +Preparing to unpack .../archives/make_4.1-6_amd64.deb ... +Unpacking make (4.1-6) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../libdpkg-perl_1.18.4ubuntu1.7_all.deb ... +Unpacking libdpkg-perl (1.18.4ubuntu1.7) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../dpkg-dev_1.18.4ubuntu1.7_all.deb ... +Unpacking dpkg-dev (1.18.4ubuntu1.7) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../build-essential_12.1ubuntu2_amd64.deb ... +Unpacking build-essential (12.1ubuntu2) ... +Selecting previously unselected package msr-tools. +Preparing to unpack .../msr-tools_1.3-2_amd64.deb ... +Unpacking msr-tools (1.3-2) ... +Selecting previously unselected package cpu-checker. +Preparing to unpack .../cpu-checker_0.7-0ubuntu7_amd64.deb ... +Unpacking cpu-checker (0.7-0ubuntu7) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../libfakeroot_1.20.2-1ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.20.2-1ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../fakeroot_1.20.2-1ubuntu1_amd64.deb ... +Unpacking fakeroot (1.20.2-1ubuntu1) ... +Selecting previously unselected package genisoimage. +Preparing to unpack .../genisoimage_9%3a1.1.11-3ubuntu1_amd64.deb ... +Unpacking genisoimage (9:1.1.11-3ubuntu1) ... +Selecting previously unselected package libaio1:amd64. +Preparing to unpack .../libaio1_0.3.110-2_amd64.deb ... +Unpacking libaio1:amd64 (0.3.110-2) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../libalgorithm-diff-xs-perl_0.04-4build1_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-4build1) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libboost-iostreams1.58.0:amd64. +Preparing to unpack .../libboost-iostreams1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-iostreams1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-system1.58.0:amd64. +Preparing to unpack .../libboost-system1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-system1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-random1.58.0:amd64. +Preparing to unpack .../libboost-random1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-random1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-thread1.58.0:amd64. +Preparing to unpack .../libboost-thread1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-thread1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../libexpat1-dev_2.1.0-7ubuntu0.16.04.5_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.1.0-7ubuntu0.16.04.5) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../libfile-fcntllock-perl_0.22-3_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3) ... +Selecting previously unselected package libnspr4:amd64. +Preparing to unpack .../libnspr4_2%3a4.13.1-0ubuntu0.16.04.1_amd64.deb ... +Unpacking libnspr4:amd64 (2:4.13.1-0ubuntu0.16.04.1) ... +Selecting previously unselected package libnss3-nssdb. +Preparing to unpack .../libnss3-nssdb_2%3a3.28.4-0ubuntu0.16.04.14_all.deb ... +Unpacking libnss3-nssdb (2:3.28.4-0ubuntu0.16.04.14) ... +Selecting previously unselected package libnss3:amd64. +Preparing to unpack .../libnss3_2%3a3.28.4-0ubuntu0.16.04.14_amd64.deb ... +Unpacking libnss3:amd64 (2:3.28.4-0ubuntu0.16.04.14) ... +Selecting previously unselected package libpython3.5-dev:amd64. +Preparing to unpack .../libpython3.5-dev_3.5.2-2ubuntu0~16.04.13_amd64.deb ... +Unpacking libpython3.5-dev:amd64 (3.5.2-2ubuntu0~16.04.13) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../libpython3-dev_3.5.1-3_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.5.1-3) ... +Selecting previously unselected package librados2. +Preparing to unpack .../librados2_10.2.11-0ubuntu0.16.04.3_amd64.deb ... +Unpacking librados2 (10.2.11-0ubuntu0.16.04.3) ... +Selecting previously unselected package librbd1. +Preparing to unpack .../librbd1_10.2.11-0ubuntu0.16.04.3_amd64.deb ... +Unpacking librbd1 (10.2.11-0ubuntu0.16.04.3) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../manpages-dev_4.04-2_all.deb ... +Unpacking manpages-dev (4.04-2) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../python-pip-whl_8.1.1-2ubuntu0.6_all.deb ... +Unpacking python-pip-whl (8.1.1-2ubuntu0.6) ... +Selecting previously unselected package python3.5-dev. +Preparing to unpack .../python3.5-dev_3.5.2-2ubuntu0~16.04.13_amd64.deb ... +Unpacking python3.5-dev (3.5.2-2ubuntu0~16.04.13) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../python3-dev_3.5.1-3_amd64.deb ... +Unpacking python3-dev (3.5.1-3) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../python3-pip_8.1.1-2ubuntu0.6_all.deb ... +Unpacking python3-pip (8.1.1-2ubuntu0.6) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../python3-setuptools_20.7.0-1_all.deb ... +Unpacking python3-setuptools (20.7.0-1) ... +Selecting previously unselected package python3-virtualenv. +Preparing to unpack .../python3-virtualenv_15.0.1+ds-3ubuntu1.1_all.deb ... +Unpacking python3-virtualenv (15.0.1+ds-3ubuntu1.1) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../python3-wheel_0.29.0-1_all.deb ... +Unpacking python3-wheel (0.29.0-1) ... +Selecting previously unselected package qemu-block-extra:amd64. +Preparing to unpack .../qemu-block-extra_1%3a2.5+dfsg-5ubuntu10.51_amd64.deb ... +Unpacking qemu-block-extra:amd64 (1:2.5+dfsg-5ubuntu10.51) ... +Selecting previously unselected package qemu-utils. +Preparing to unpack .../qemu-utils_1%3a2.5+dfsg-5ubuntu10.51_amd64.deb ... +Unpacking qemu-utils (1:2.5+dfsg-5ubuntu10.51) ... +Selecting previously unselected package sharutils. +Preparing to unpack .../sharutils_1%3a4.15.2-1ubuntu0.1_amd64.deb ... +Unpacking sharutils (1:4.15.2-1ubuntu0.1) ... +Selecting previously unselected package ubuntu-fan. +Preparing to unpack .../ubuntu-fan_0.12.8~16.04.3_all.deb ... +Unpacking ubuntu-fan (0.12.8~16.04.3) ... +Selecting previously unselected package virtualenv. +Preparing to unpack .../virtualenv_15.0.1+ds-3ubuntu1.1_all.deb ... +Unpacking virtualenv (15.0.1+ds-3ubuntu1.1) ... +Selecting previously unselected package cloud-image-utils. +Preparing to unpack .../cloud-image-utils_0.27-0ubuntu25.2_all.deb ... +Unpacking cloud-image-utils (0.27-0ubuntu25.2) ... +Selecting previously unselected package cloud-utils. +Preparing to unpack .../cloud-utils_0.27-0ubuntu25.2_all.deb ... +Unpacking cloud-utils (0.27-0ubuntu25.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for man-db (2.7.5-1) ... +Processing triggers for install-info (6.1.0.dfsg.1-5) ... +Processing triggers for ureadahead (0.100.0-19.1) ... +Processing triggers for systemd (229-4ubuntu21.31) ... +Setting up libiscsi2:amd64 (1.12.0-2) ... +Setting up libmpc3:amd64 (1.0.3-1) ... +Setting up binutils (2.26.1-1ubuntu1~16.04.8) ... +Setting up bridge-utils (1.5-9ubuntu1) ... +Setting up libc-dev-bin (2.23-0ubuntu11.3) ... +Setting up linux-libc-dev:amd64 (4.4.0-210.242) ... +Setting up libc6-dev:amd64 (2.23-0ubuntu11.3) ... +Setting up libisl15:amd64 (0.16.1-1) ... +Setting up cpp-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up cpp (4:5.3.1-1ubuntu1) ... +Setting up libcc1-0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libgomp1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libitm1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libatomic1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libasan2:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up liblsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libtsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libubsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libcilkrts5:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libmpx0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libquadmath0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libgcc-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up gcc-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up gcc (4:5.3.1-1ubuntu1) ... +Setting up libstdc++-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up g++-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up g++ (4:5.3.1-1ubuntu1) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up make (4.1-6) ... +Setting up libdpkg-perl (1.18.4ubuntu1.7) ... +Setting up dpkg-dev (1.18.4ubuntu1.7) ... +Setting up build-essential (12.1ubuntu2) ... +Setting up msr-tools (1.3-2) ... +Setting up cpu-checker (0.7-0ubuntu7) ... +Setting up libfakeroot:amd64 (1.20.2-1ubuntu1) ... +Setting up fakeroot (1.20.2-1ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up genisoimage (9:1.1.11-3ubuntu1) ... +Setting up libaio1:amd64 (0.3.110-2) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libalgorithm-diff-xs-perl (0.04-4build1) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libboost-iostreams1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-system1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-random1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-thread1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libexpat1-dev:amd64 (2.1.0-7ubuntu0.16.04.5) ... +Setting up libfile-fcntllock-perl (0.22-3) ... +Setting up libnspr4:amd64 (2:4.13.1-0ubuntu0.16.04.1) ... +Setting up libpython3.5-dev:amd64 (3.5.2-2ubuntu0~16.04.13) ... +Setting up libpython3-dev:amd64 (3.5.1-3) ... +Setting up manpages-dev (4.04-2) ... +Setting up python-pip-whl (8.1.1-2ubuntu0.6) ... +Setting up python3.5-dev (3.5.2-2ubuntu0~16.04.13) ... +Setting up python3-dev (3.5.1-3) ... +Setting up python3-pip (8.1.1-2ubuntu0.6) ... +Setting up python3-setuptools (20.7.0-1) ... +Setting up python3-virtualenv (15.0.1+ds-3ubuntu1.1) ... +Setting up python3-wheel (0.29.0-1) ... +Setting up sharutils (1:4.15.2-1ubuntu0.1) ... +Setting up ubuntu-fan (0.12.8~16.04.3) ... +Setting up virtualenv (15.0.1+ds-3ubuntu1.1) ... +Setting up libnss3-nssdb (2:3.28.4-0ubuntu0.16.04.14) ... +Setting up libnss3:amd64 (2:3.28.4-0ubuntu0.16.04.14) ... +Setting up librados2 (10.2.11-0ubuntu0.16.04.3) ... +Setting up librbd1 (10.2.11-0ubuntu0.16.04.3) ... +Setting up qemu-block-extra:amd64 (1:2.5+dfsg-5ubuntu10.51) ... +Setting up qemu-utils (1:2.5+dfsg-5ubuntu10.51) ... +Setting up cloud-image-utils (0.27-0ubuntu25.2) ... +Setting up cloud-utils (0.27-0ubuntu25.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for ureadahead (0.100.0-19.1) ... +Processing triggers for systemd (229-4ubuntu21.31) ... ++ echo 'Installing Juju agent 2.9.31-ubuntu-amd64' +Installing Juju agent 2.9.31-ubuntu-amd64 ++ lxc exec juju-xenial-base -- mkdir -p /var/lib/juju/tools/2.9.31-ubuntu-amd64/ ++ lxc exec juju-xenial-base -- curl -sS --connect-timeout 20 --noproxy '*' --insecure -o /var/lib/juju/tools/2.9.31-ubuntu-amd64/tools.tar.gz https://streams.canonical.com/juju/tools/agent/2.9.31/juju-2.9.31-ubuntu-amd64.tgz ++ lxc exec juju-xenial-base -- tar zxf /var/lib/juju/tools/2.9.31-ubuntu-amd64/tools.tar.gz -C /var/lib/juju/tools/2.9.31-ubuntu-amd64 + +gzip: stdin: not in gzip format +tar: Child returned status 1 +tar: Error is not recoverable: exiting now ++ true ++ lxc stop juju-xenial-base ++ lxc image delete juju/xenial/amd64 +Error: not found ++ true ++ lxc image delete clean-xenial +++ date +%Y%m%d ++ lxc publish juju-xenial-base --alias juju/xenial/amd64 'description=xenial juju dev image (20220616)' +Instance published with fingerprint: 34400d1f74286e5558894ba8cea6f43b3197d0f1e95d4f42be60c400930a6b21 ++ lxc delete juju-xenial-base -f ++ '[' 1 == 1 ']' ++ cache bionic virtualenv ++ series=bionic ++ container=juju-bionic-base ++ alias=juju/bionic/amd64 ++ lxc delete juju-bionic-base -f +Error: Not Found ++ true ++ lxc image copy ubuntu:bionic local: --alias clean-bionic +Image copied successfully! ++ lxc launch ubuntu:bionic juju-bionic-base +Creating juju-bionic-base +Starting juju-bionic-base ++ sleep 15 ++ lxc exec juju-bionic-base -- apt-get update -y +Get:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:4 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2304 kB] +Get:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:6 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [401 kB] +Get:7 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [786 kB] +Get:8 http://security.ubuntu.com/ubuntu bionic-security/restricted Translation-en [108 kB] +Get:9 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1212 kB] +Get:10 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [279 kB] +Get:11 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [19.0 kB] +Get:12 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [3836 B] +Get:13 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:17 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2646 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1826 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [396 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [24.9 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6012 B] +Get:22 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.8 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [5016 B] +Get:24 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [11.6 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [5864 B] +Fetched 24.1 MB in 10s (2392 kB/s) +Reading package lists... ++ lxc exec juju-bionic-base -- apt-get upgrade -y +Reading package lists... +Building dependency tree... +Reading state information... +Calculating upgrade... +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. ++ lxc exec juju-bionic-base -- apt-get install -y curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan gcc build-essential python3-pip python3-setuptools python3-yaml virtualenv +Reading package lists... +Building dependency tree... +Reading state information... +python3-yaml is already the newest version (3.12-1build2). +python3-yaml set to manually installed. +curl is already the newest version (7.58.0-2ubuntu3.18). +curl set to manually installed. +tmux is already the newest version (2.6-3ubuntu0.2). +tmux set to manually installed. +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu cloud-image-utils cpp + cpp-7 dh-python dpkg-dev fakeroot g++ g++-7 gcc-7 gcc-7-base genisoimage + ibverbs-providers libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan4 libatomic1 libbinutils libc-dev-bin + libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev libfakeroot + libfile-fcntllock-perl libgcc-7-dev libgomp1 libibverbs1 libiscsi7 libisl19 + libitm1 liblsan0 libmpc3 libmpx2 libnl-3-200 libnl-route-3-200 libnspr4 + libnss3 libpython3-dev libpython3.6-dev libquadmath0 librados2 librbd1 + libstdc++-7-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-crypto python3-dev python3-distutils + python3-keyring python3-keyrings.alt python3-lib2to3 python3-secretstorage + python3-virtualenv python3-wheel python3-xdg python3.6-dev qemu-block-extra + qemu-utils sharutils +Suggested packages: + binutils-doc ifupdown cloud-utils-euca mtools cpp-doc gcc-7-locales + debian-keyring g++-multilib g++-7-multilib gcc-7-doc libstdc++6-7-dbg + gcc-multilib autoconf automake libtool flex bison gdb gcc-doc gcc-7-multilib + libgcc1-dbg libgomp1-dbg libitm1-dbg libatomic1-dbg libasan4-dbg + liblsan0-dbg libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx2-dbg + libquadmath0-dbg wodim cdrkit-doc glibc-doc bzr libstdc++-7-doc make-doc + python-crypto-doc gnome-keyring libkf5wallet-bin gir1.2-gnomekeyring-1.0 + python-secretstorage-doc python-setuptools-doc debootstrap sharutils-doc + bsd-mailx | mailx +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu bridge-utils + build-essential cloud-image-utils cloud-utils cpp cpp-7 cpu-checker + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base genisoimage + ibverbs-providers libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan4 libatomic1 libbinutils libc-dev-bin + libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev libfakeroot + libfile-fcntllock-perl libgcc-7-dev libgomp1 libibverbs1 libiscsi7 libisl19 + libitm1 liblsan0 libmpc3 libmpx2 libnl-3-200 libnl-route-3-200 libnspr4 + libnss3 libpython3-dev libpython3.6-dev libquadmath0 librados2 librbd1 + libstdc++-7-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-crypto python3-dev python3-distutils + python3-keyring python3-keyrings.alt python3-lib2to3 python3-pip + python3-secretstorage python3-setuptools python3-virtualenv python3-wheel + python3-xdg python3.6-dev qemu-block-extra qemu-utils sharutils ubuntu-fan + virtualenv +0 upgraded, 79 newly installed, 0 to remove and 0 not upgraded. +Need to get 98.2 MB of archives. +After this operation, 283 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-common amd64 2.30-21ubuntu1~18.04.7 [197 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libbinutils amd64 2.30-21ubuntu1~18.04.7 [489 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.30-21ubuntu1~18.04.7 [1839 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils amd64 2.30-21ubuntu1~18.04.7 [3388 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic/main amd64 bridge-utils amd64 1.5-15ubuntu1 [30.1 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc-dev-bin amd64 2.27-3ubuntu1.6 [71.9 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 linux-libc-dev amd64 4.15.0-187.198 [980 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev amd64 2.27-3ubuntu1.6 [2587 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7-base amd64 7.5.0-3ubuntu1~18.04 [18.3 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic/main amd64 libisl19 amd64 0.19-1 [551 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic/main amd64 libmpc3 amd64 1.1.0-1 [40.8 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp-7 amd64 7.5.0-3ubuntu1~18.04 [8591 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp amd64 4:7.4.0-1ubuntu2.3 [27.7 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcc1-0 amd64 8.4.0-1ubuntu1~18.04 [39.4 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgomp1 amd64 8.4.0-1ubuntu1~18.04 [76.5 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libitm1 amd64 8.4.0-1ubuntu1~18.04 [27.9 kB] +Get:17 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libatomic1 amd64 8.4.0-1ubuntu1~18.04 [9192 B] +Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libasan4 amd64 7.5.0-3ubuntu1~18.04 [358 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 liblsan0 amd64 8.4.0-1ubuntu1~18.04 [133 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libtsan0 amd64 8.4.0-1ubuntu1~18.04 [288 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libubsan0 amd64 7.5.0-3ubuntu1~18.04 [126 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcilkrts5 amd64 7.5.0-3ubuntu1~18.04 [42.5 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmpx2 amd64 8.4.0-1ubuntu1~18.04 [11.6 kB] +Get:24 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libquadmath0 amd64 8.4.0-1ubuntu1~18.04 [134 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [2378 kB] +Get:26 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7 amd64 7.5.0-3ubuntu1~18.04 [9381 kB] +Get:27 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc amd64 4:7.4.0-1ubuntu2.3 [5184 B] +Get:28 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libstdc++-7-dev amd64 7.5.0-3ubuntu1~18.04 [1471 kB] +Get:29 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++-7 amd64 7.5.0-3ubuntu1~18.04 [9697 kB] +Get:30 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++ amd64 4:7.4.0-1ubuntu2.3 [1568 B] +Get:31 http://archive.ubuntu.com/ubuntu bionic/main amd64 make amd64 4.1-9.1ubuntu1 [154 kB] +Get:32 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libdpkg-perl all 1.19.0.5ubuntu2.4 [212 kB] +Get:33 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 dpkg-dev all 1.19.0.5ubuntu2.4 [607 kB] +Get:34 http://archive.ubuntu.com/ubuntu bionic/main amd64 build-essential amd64 12.4ubuntu1 [4758 B] +Get:35 http://archive.ubuntu.com/ubuntu bionic/main amd64 msr-tools amd64 1.3-2build1 [9760 B] +Get:36 http://archive.ubuntu.com/ubuntu bionic/main amd64 cpu-checker amd64 0.7-0ubuntu7 [6862 B] +Get:37 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-lib2to3 all 3.6.9-1~18.04 [77.4 kB] +Get:38 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-distutils all 3.6.9-1~18.04 [144 kB] +Get:39 http://archive.ubuntu.com/ubuntu bionic/main amd64 dh-python all 3.20180325ubuntu2 [89.2 kB] +Get:40 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfakeroot amd64 1.22-2ubuntu1 [25.9 kB] +Get:41 http://archive.ubuntu.com/ubuntu bionic/main amd64 fakeroot amd64 1.22-2ubuntu1 [62.3 kB] +Get:42 http://archive.ubuntu.com/ubuntu bionic/main amd64 genisoimage amd64 9:1.1.11-3ubuntu2 [328 kB] +Get:43 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnl-3-200 amd64 3.2.29-0ubuntu3 [52.8 kB] +Get:44 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnl-route-3-200 amd64 3.2.29-0ubuntu3 [146 kB] +Get:45 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libibverbs1 amd64 17.1-1ubuntu0.2 [44.4 kB] +Get:46 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 ibverbs-providers amd64 17.1-1ubuntu0.2 [160 kB] +Get:47 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libaio1 amd64 0.3.110-5ubuntu0.1 [6476 B] +Get:48 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:49 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-xs-perl amd64 0.04-5 [11.1 kB] +Get:50 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:51 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libexpat1-dev amd64 2.2.5-3ubuntu0.7 [124 kB] +Get:52 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfile-fcntllock-perl amd64 0.22-3build2 [33.2 kB] +Get:53 http://archive.ubuntu.com/ubuntu bionic/main amd64 libiscsi7 amd64 1.17.0-1.1 [55.4 kB] +Get:54 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnspr4 amd64 2:4.18-1ubuntu1 [112 kB] +Get:55 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libnss3 amd64 2:3.35-2ubuntu2.14 [1220 kB] +Get:56 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3.6-dev amd64 3.6.9-1~18.04ubuntu1.7 [44.9 MB] +Get:57 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3-dev amd64 3.6.7-1~18.04 [7328 B] +Get:58 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 librados2 amd64 12.2.13-0ubuntu0.18.04.10 [2725 kB] +Get:59 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 librbd1 amd64 12.2.13-0ubuntu0.18.04.10 [923 kB] +Get:60 http://archive.ubuntu.com/ubuntu bionic/main amd64 manpages-dev all 4.15-1 [2217 kB] +Get:61 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-pip-whl all 9.0.1-2.3~ubuntu1.18.04.5 [1653 kB] +Get:62 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-crypto amd64 2.6.1-8ubuntu2 [244 kB] +Get:63 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3.6-dev amd64 3.6.9-1~18.04ubuntu1.7 [511 kB] +Get:64 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-dev amd64 3.6.7-1~18.04 [1288 B] +Get:65 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-secretstorage all 2.3.1-2 [12.1 kB] +Get:66 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyring all 10.6.0-1 [26.7 kB] +Get:67 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyrings.alt all 3.0-1 [16.6 kB] +Get:68 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python3-pip all 9.0.1-2.3~ubuntu1.18.04.5 [114 kB] +Get:69 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-setuptools all 39.0.1-2 [248 kB] +Get:70 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-virtualenv all 15.1.0+ds-1.1 [43.4 kB] +Get:71 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-wheel all 0.30.0-0.2 [36.5 kB] +Get:72 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-xdg all 0.25-4ubuntu1.1 [31.3 kB] +Get:73 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 qemu-block-extra amd64 1:2.11+dfsg-1ubuntu7.39 [42.0 kB] +Get:74 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 qemu-utils amd64 1:2.11+dfsg-1ubuntu7.39 [871 kB] +Get:75 http://archive.ubuntu.com/ubuntu bionic/main amd64 sharutils amd64 1:4.15.2-3 [155 kB] +Get:76 http://archive.ubuntu.com/ubuntu bionic/main amd64 ubuntu-fan all 0.12.10 [34.7 kB] +Get:77 http://archive.ubuntu.com/ubuntu bionic/universe amd64 virtualenv all 15.1.0+ds-1.1 [4476 B] +Get:78 http://archive.ubuntu.com/ubuntu bionic/main amd64 cloud-image-utils all 0.30-0ubuntu5 [16.9 kB] +Get:79 http://archive.ubuntu.com/ubuntu bionic/main amd64 cloud-utils all 0.30-0ubuntu5 [1596 B] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 98.2 MB in 4s (22.2 MB/s) +Selecting previously unselected package binutils-common:amd64. +(Reading database ... 29038 files and directories currently installed.) +Preparing to unpack .../00-binutils-common_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils-common:amd64 (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../01-libbinutils_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking libbinutils:amd64 (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../02-binutils-x86-64-linux-gnu_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package binutils. +Preparing to unpack .../03-binutils_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package bridge-utils. +Preparing to unpack .../04-bridge-utils_1.5-15ubuntu1_amd64.deb ... +Unpacking bridge-utils (1.5-15ubuntu1) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../05-libc-dev-bin_2.27-3ubuntu1.6_amd64.deb ... +Unpacking libc-dev-bin (2.27-3ubuntu1.6) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../06-linux-libc-dev_4.15.0-187.198_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.15.0-187.198) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../07-libc6-dev_2.27-3ubuntu1.6_amd64.deb ... +Unpacking libc6-dev:amd64 (2.27-3ubuntu1.6) ... +Selecting previously unselected package gcc-7-base:amd64. +Preparing to unpack .../08-gcc-7-base_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libisl19:amd64. +Preparing to unpack .../09-libisl19_0.19-1_amd64.deb ... +Unpacking libisl19:amd64 (0.19-1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../10-libmpc3_1.1.0-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.1.0-1) ... +Selecting previously unselected package cpp-7. +Preparing to unpack .../11-cpp-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking cpp-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package cpp. +Preparing to unpack .../12-cpp_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking cpp (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../13-libcc1-0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../14-libgomp1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../15-libitm1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../16-libatomic1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libasan4:amd64. +Preparing to unpack .../17-libasan4_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../18-liblsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../19-libtsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../20-libubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../21-libcilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libmpx2:amd64. +Preparing to unpack .../22-libmpx2_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../23-libquadmath0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgcc-7-dev:amd64. +Preparing to unpack .../24-libgcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc-7. +Preparing to unpack .../25-gcc-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc. +Preparing to unpack .../26-gcc_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking gcc (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libstdc++-7-dev:amd64. +Preparing to unpack .../27-libstdc++-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++-7. +Preparing to unpack .../28-g++-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking g++-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++. +Preparing to unpack .../29-g++_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking g++ (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package make. +Preparing to unpack .../30-make_4.1-9.1ubuntu1_amd64.deb ... +Unpacking make (4.1-9.1ubuntu1) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../31-libdpkg-perl_1.19.0.5ubuntu2.4_all.deb ... +Unpacking libdpkg-perl (1.19.0.5ubuntu2.4) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../32-dpkg-dev_1.19.0.5ubuntu2.4_all.deb ... +Unpacking dpkg-dev (1.19.0.5ubuntu2.4) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../33-build-essential_12.4ubuntu1_amd64.deb ... +Unpacking build-essential (12.4ubuntu1) ... +Selecting previously unselected package msr-tools. +Preparing to unpack .../34-msr-tools_1.3-2build1_amd64.deb ... +Unpacking msr-tools (1.3-2build1) ... +Selecting previously unselected package cpu-checker. +Preparing to unpack .../35-cpu-checker_0.7-0ubuntu7_amd64.deb ... +Unpacking cpu-checker (0.7-0ubuntu7) ... +Selecting previously unselected package python3-lib2to3. +Preparing to unpack .../36-python3-lib2to3_3.6.9-1~18.04_all.deb ... +Unpacking python3-lib2to3 (3.6.9-1~18.04) ... +Selecting previously unselected package python3-distutils. +Preparing to unpack .../37-python3-distutils_3.6.9-1~18.04_all.deb ... +Unpacking python3-distutils (3.6.9-1~18.04) ... +Selecting previously unselected package dh-python. +Preparing to unpack .../38-dh-python_3.20180325ubuntu2_all.deb ... +Unpacking dh-python (3.20180325ubuntu2) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../39-libfakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.22-2ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../40-fakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking fakeroot (1.22-2ubuntu1) ... +Selecting previously unselected package genisoimage. +Preparing to unpack .../41-genisoimage_9%3a1.1.11-3ubuntu2_amd64.deb ... +Unpacking genisoimage (9:1.1.11-3ubuntu2) ... +Selecting previously unselected package libnl-3-200:amd64. +Preparing to unpack .../42-libnl-3-200_3.2.29-0ubuntu3_amd64.deb ... +Unpacking libnl-3-200:amd64 (3.2.29-0ubuntu3) ... +Selecting previously unselected package libnl-route-3-200:amd64. +Preparing to unpack .../43-libnl-route-3-200_3.2.29-0ubuntu3_amd64.deb ... +Unpacking libnl-route-3-200:amd64 (3.2.29-0ubuntu3) ... +Selecting previously unselected package libibverbs1:amd64. +Preparing to unpack .../44-libibverbs1_17.1-1ubuntu0.2_amd64.deb ... +Unpacking libibverbs1:amd64 (17.1-1ubuntu0.2) ... +Selecting previously unselected package ibverbs-providers:amd64. +Preparing to unpack .../45-ibverbs-providers_17.1-1ubuntu0.2_amd64.deb ... +Unpacking ibverbs-providers:amd64 (17.1-1ubuntu0.2) ... +Selecting previously unselected package libaio1:amd64. +Preparing to unpack .../46-libaio1_0.3.110-5ubuntu0.1_amd64.deb ... +Unpacking libaio1:amd64 (0.3.110-5ubuntu0.1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../47-libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../48-libalgorithm-diff-xs-perl_0.04-5_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-5) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../49-libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../50-libexpat1-dev_2.2.5-3ubuntu0.7_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.2.5-3ubuntu0.7) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../51-libfile-fcntllock-perl_0.22-3build2_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3build2) ... +Selecting previously unselected package libiscsi7:amd64. +Preparing to unpack .../52-libiscsi7_1.17.0-1.1_amd64.deb ... +Unpacking libiscsi7:amd64 (1.17.0-1.1) ... +Selecting previously unselected package libnspr4:amd64. +Preparing to unpack .../53-libnspr4_2%3a4.18-1ubuntu1_amd64.deb ... +Unpacking libnspr4:amd64 (2:4.18-1ubuntu1) ... +Selecting previously unselected package libnss3:amd64. +Preparing to unpack .../54-libnss3_2%3a3.35-2ubuntu2.14_amd64.deb ... +Unpacking libnss3:amd64 (2:3.35-2ubuntu2.14) ... +Selecting previously unselected package libpython3.6-dev:amd64. +Preparing to unpack .../55-libpython3.6-dev_3.6.9-1~18.04ubuntu1.7_amd64.deb ... +Unpacking libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.7) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../56-libpython3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.6.7-1~18.04) ... +Selecting previously unselected package librados2. +Preparing to unpack .../57-librados2_12.2.13-0ubuntu0.18.04.10_amd64.deb ... +Unpacking librados2 (12.2.13-0ubuntu0.18.04.10) ... +Selecting previously unselected package librbd1. +Preparing to unpack .../58-librbd1_12.2.13-0ubuntu0.18.04.10_amd64.deb ... +Unpacking librbd1 (12.2.13-0ubuntu0.18.04.10) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../59-manpages-dev_4.15-1_all.deb ... +Unpacking manpages-dev (4.15-1) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../60-python-pip-whl_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-crypto. +Preparing to unpack .../61-python3-crypto_2.6.1-8ubuntu2_amd64.deb ... +Unpacking python3-crypto (2.6.1-8ubuntu2) ... +Selecting previously unselected package python3.6-dev. +Preparing to unpack .../62-python3.6-dev_3.6.9-1~18.04ubuntu1.7_amd64.deb ... +Unpacking python3.6-dev (3.6.9-1~18.04ubuntu1.7) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../63-python3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking python3-dev (3.6.7-1~18.04) ... +Selecting previously unselected package python3-secretstorage. +Preparing to unpack .../64-python3-secretstorage_2.3.1-2_all.deb ... +Unpacking python3-secretstorage (2.3.1-2) ... +Selecting previously unselected package python3-keyring. +Preparing to unpack .../65-python3-keyring_10.6.0-1_all.deb ... +Unpacking python3-keyring (10.6.0-1) ... +Selecting previously unselected package python3-keyrings.alt. +Preparing to unpack .../66-python3-keyrings.alt_3.0-1_all.deb ... +Unpacking python3-keyrings.alt (3.0-1) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../67-python3-pip_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../68-python3-setuptools_39.0.1-2_all.deb ... +Unpacking python3-setuptools (39.0.1-2) ... +Selecting previously unselected package python3-virtualenv. +Preparing to unpack .../69-python3-virtualenv_15.1.0+ds-1.1_all.deb ... +Unpacking python3-virtualenv (15.1.0+ds-1.1) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../70-python3-wheel_0.30.0-0.2_all.deb ... +Unpacking python3-wheel (0.30.0-0.2) ... +Selecting previously unselected package python3-xdg. +Preparing to unpack .../71-python3-xdg_0.25-4ubuntu1.1_all.deb ... +Unpacking python3-xdg (0.25-4ubuntu1.1) ... +Selecting previously unselected package qemu-block-extra:amd64. +Preparing to unpack .../72-qemu-block-extra_1%3a2.11+dfsg-1ubuntu7.39_amd64.deb ... +Unpacking qemu-block-extra:amd64 (1:2.11+dfsg-1ubuntu7.39) ... +Selecting previously unselected package qemu-utils. +Preparing to unpack .../73-qemu-utils_1%3a2.11+dfsg-1ubuntu7.39_amd64.deb ... +Unpacking qemu-utils (1:2.11+dfsg-1ubuntu7.39) ... +Selecting previously unselected package sharutils. +Preparing to unpack .../74-sharutils_1%3a4.15.2-3_amd64.deb ... +Unpacking sharutils (1:4.15.2-3) ... +Selecting previously unselected package ubuntu-fan. +Preparing to unpack .../75-ubuntu-fan_0.12.10_all.deb ... +Unpacking ubuntu-fan (0.12.10) ... +Selecting previously unselected package virtualenv. +Preparing to unpack .../76-virtualenv_15.1.0+ds-1.1_all.deb ... +Unpacking virtualenv (15.1.0+ds-1.1) ... +Selecting previously unselected package cloud-image-utils. +Preparing to unpack .../77-cloud-image-utils_0.30-0ubuntu5_all.deb ... +Unpacking cloud-image-utils (0.30-0ubuntu5) ... +Selecting previously unselected package cloud-utils. +Preparing to unpack .../78-cloud-utils_0.30-0ubuntu5_all.deb ... +Unpacking cloud-utils (0.30-0ubuntu5) ... +Setting up libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up make (4.1-9.1ubuntu1) ... +Setting up python3-crypto (2.6.1-8ubuntu2) ... +Setting up libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up sharutils (1:4.15.2-3) ... +Setting up libiscsi7:amd64 (1.17.0-1.1) ... +Setting up python3-xdg (0.25-4ubuntu1.1) ... +Setting up python3-keyrings.alt (3.0-1) ... +Setting up linux-libc-dev:amd64 (4.15.0-187.198) ... +Setting up genisoimage (9:1.1.11-3ubuntu2) ... +Setting up libdpkg-perl (1.19.0.5ubuntu2.4) ... +Setting up python3-wheel (0.30.0-0.2) ... +Setting up msr-tools (1.3-2build1) ... +Setting up libnspr4:amd64 (2:4.18-1ubuntu1) ... +Setting up bridge-utils (1.5-15ubuntu1) ... +Setting up liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up binutils-common:amd64 (2.30-21ubuntu1~18.04.7) ... +Setting up libfile-fcntllock-perl (0.22-3build2) ... +Setting up libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libaio1:amd64 (0.3.110-5ubuntu0.1) ... +Setting up ubuntu-fan (0.12.10) ... +Created symlink /etc/systemd/system/multi-user.target.wants/ubuntu-fan.service → /lib/systemd/system/ubuntu-fan.service. +Setting up cpu-checker (0.7-0ubuntu7) ... +Setting up libfakeroot:amd64 (1.22-2ubuntu1) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libmpc3:amd64 (1.1.0-1) ... +Setting up libc-dev-bin (2.27-3ubuntu1.6) ... +Setting up libnl-3-200:amd64 (3.2.29-0ubuntu3) ... +Setting up python3-lib2to3 (3.6.9-1~18.04) ... +Setting up python3-secretstorage (2.3.1-2) ... +Setting up manpages-dev (4.15-1) ... +Setting up libc6-dev:amd64 (2.27-3ubuntu1.6) ... +Setting up python3-distutils (3.6.9-1~18.04) ... +Setting up libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libisl19:amd64 (0.19-1) ... +Setting up libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-keyring (10.6.0-1) ... +Setting up libbinutils:amd64 (2.30-21ubuntu1~18.04.7) ... +Setting up libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libnss3:amd64 (2:3.35-2ubuntu2.14) ... +Setting up libnl-route-3-200:amd64 (3.2.29-0ubuntu3) ... +Setting up fakeroot (1.22-2ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up cpp-7 (7.5.0-3ubuntu1~18.04) ... +Setting up libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libalgorithm-diff-xs-perl (0.04-5) ... +Setting up python3-virtualenv (15.1.0+ds-1.1) ... +Setting up python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libexpat1-dev:amd64 (2.2.5-3ubuntu0.7) ... +Setting up python3-setuptools (39.0.1-2) ... +Setting up dh-python (3.20180325ubuntu2) ... +Setting up virtualenv (15.1.0+ds-1.1) ... +Setting up binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.7) ... +Setting up libibverbs1:amd64 (17.1-1ubuntu0.2) ... +Setting up cpp (4:7.4.0-1ubuntu2.3) ... +Setting up libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.7) ... +Setting up librados2 (12.2.13-0ubuntu0.18.04.10) ... +Setting up ibverbs-providers:amd64 (17.1-1ubuntu0.2) ... +Setting up binutils (2.30-21ubuntu1~18.04.7) ... +Setting up python3.6-dev (3.6.9-1~18.04ubuntu1.7) ... +Setting up libpython3-dev:amd64 (3.6.7-1~18.04) ... +Setting up gcc-7 (7.5.0-3ubuntu1~18.04) ... +Setting up g++-7 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-dev (3.6.7-1~18.04) ... +Setting up librbd1 (12.2.13-0ubuntu0.18.04.10) ... +Setting up gcc (4:7.4.0-1ubuntu2.3) ... +Setting up qemu-block-extra:amd64 (1:2.11+dfsg-1ubuntu7.39) ... +Setting up qemu-utils (1:2.11+dfsg-1ubuntu7.39) ... +Setting up dpkg-dev (1.19.0.5ubuntu2.4) ... +Setting up g++ (4:7.4.0-1ubuntu2.3) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up cloud-image-utils (0.30-0ubuntu5) ... +Setting up build-essential (12.4ubuntu1) ... +Setting up cloud-utils (0.30-0ubuntu5) ... +Processing triggers for install-info (6.5.0.dfsg.1-2) ... +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +Processing triggers for systemd (237-3ubuntu10.53) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... ++ echo 'Installing Juju agent 2.9.31-ubuntu-amd64' +Installing Juju agent 2.9.31-ubuntu-amd64 ++ lxc exec juju-bionic-base -- mkdir -p /var/lib/juju/tools/2.9.31-ubuntu-amd64/ ++ lxc exec juju-bionic-base -- curl -sS --connect-timeout 20 --noproxy '*' --insecure -o /var/lib/juju/tools/2.9.31-ubuntu-amd64/tools.tar.gz https://streams.canonical.com/juju/tools/agent/2.9.31/juju-2.9.31-ubuntu-amd64.tgz ++ lxc exec juju-bionic-base -- tar zxf /var/lib/juju/tools/2.9.31-ubuntu-amd64/tools.tar.gz -C /var/lib/juju/tools/2.9.31-ubuntu-amd64 + +gzip: stdin: not in gzip format +tar: Child returned status 1 +tar: Error is not recoverable: exiting now ++ true ++ lxc stop juju-bionic-base ++ lxc image delete juju/bionic/amd64 +Error: not found ++ true ++ lxc image delete clean-bionic +++ date +%Y%m%d ++ lxc publish juju-bionic-base --alias juju/bionic/amd64 'description=bionic juju dev image (20220616)' +Instance published with fingerprint: cdb957d77b9e558ef961336cf35066c284e773802b595838f92080a9e34b4240 ++ lxc delete juju-bionic-base -f +Finished installation of juju + +k8s substrate added as cloud "k8scloud" with storage provisioned +by the existing "openebs-hostpath" storage class. +You can now bootstrap to this cloud by running 'juju bootstrap k8scloud'. +Creating Juju controller "osm" on k8scloud +Bootstrap to generic Kubernetes cluster +Fetching Juju Dashboard 0.8.1 +Creating k8s resources for controller "controller-osm" +Downloading images +Starting controller pod +Bootstrap agent now started +Contacting Juju controller at 192.168.64.22 to verify accessibility... + +Bootstrap complete, controller "osm" is now available in namespace "controller-osm" + +Now you can run + juju add-model +to create a new model to deploy k8s workloads. +Can't load /home/ubuntu/.rnd into RNG +140682814927296:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/home/ubuntu/.rnd +Generating a RSA private key +.........+++++ +..........+++++ +writing new private key to '/tmp/.osm/client.key' +----- +Cloud "lxd-cloud" added to controller "osm". +WARNING loading credentials: credentials for cloud lxd-cloud not found +To upload a credential to the controller for cloud "lxd-cloud", use +* 'add-model' with --credential option or +* 'add-credential -c lxd-cloud'. +Using cloud "lxd-cloud" from the controller to verify credentials. +Controller credential "lxd-cloud" for user "admin" for cloud "lxd-cloud" on controller "osm" added. +For more information, see ‘juju show-credential lxd-cloud lxd-cloud’. +Pulling and generating docker images +Pulling docker images +Using default tag: latest +latest: Pulling from wurstmeister/zookeeper +a3ed95caeb02: Pulling fs layer +ef38b711a50f: Pulling fs layer +e057c74597c7: Pulling fs layer +666c214f6385: Pulling fs layer +c3d6a96f1ffc: Pulling fs layer +3fe26a83e0ca: Pulling fs layer +666c214f6385: Waiting +c3d6a96f1ffc: Waiting +3fe26a83e0ca: Waiting +3d3a7dd3a3b1: Pulling fs layer +f8cc938abe5f: Pulling fs layer +9978b75f7a58: Pulling fs layer +4d4dbcc8f8cc: Pulling fs layer +3d3a7dd3a3b1: Waiting +f8cc938abe5f: Waiting +9978b75f7a58: Waiting +4d4dbcc8f8cc: Waiting +8b130a9baa49: Pulling fs layer +6b9611650a73: Pulling fs layer +5df5aac51927: Pulling fs layer +76eea4448d9b: Pulling fs layer +8b66990876c6: Pulling fs layer +f0dd38204b6f: Pulling fs layer +8b130a9baa49: Waiting +6b9611650a73: Waiting +5df5aac51927: Waiting +8b66990876c6: Waiting +f0dd38204b6f: Waiting +76eea4448d9b: Waiting +a3ed95caeb02: Verifying Checksum +a3ed95caeb02: Download complete +e057c74597c7: Verifying Checksum +e057c74597c7: Download complete +666c214f6385: Verifying Checksum +666c214f6385: Download complete +c3d6a96f1ffc: Verifying Checksum +c3d6a96f1ffc: Download complete +a3ed95caeb02: Pull complete +3fe26a83e0ca: Verifying Checksum +3fe26a83e0ca: Download complete +f8cc938abe5f: Verifying Checksum +f8cc938abe5f: Download complete +9978b75f7a58: Verifying Checksum +9978b75f7a58: Download complete +4d4dbcc8f8cc: Verifying Checksum +4d4dbcc8f8cc: Download complete +ef38b711a50f: Verifying Checksum +ef38b711a50f: Download complete +6b9611650a73: Verifying Checksum +6b9611650a73: Download complete +8b130a9baa49: Verifying Checksum +8b130a9baa49: Download complete +76eea4448d9b: Verifying Checksum +76eea4448d9b: Download complete +8b66990876c6: Verifying Checksum +8b66990876c6: Download complete +5df5aac51927: Verifying Checksum +5df5aac51927: Download complete +f0dd38204b6f: Verifying Checksum +f0dd38204b6f: Download complete +3d3a7dd3a3b1: Verifying Checksum +3d3a7dd3a3b1: Download complete +ef38b711a50f: Pull complete +e057c74597c7: Pull complete +666c214f6385: Pull complete +c3d6a96f1ffc: Pull complete +3fe26a83e0ca: Pull complete +3d3a7dd3a3b1: Pull complete +f8cc938abe5f: Pull complete +9978b75f7a58: Pull complete +4d4dbcc8f8cc: Pull complete +8b130a9baa49: Pull complete +6b9611650a73: Pull complete +5df5aac51927: Pull complete +76eea4448d9b: Pull complete +8b66990876c6: Pull complete +f0dd38204b6f: Pull complete +Digest: sha256:7a7fd44a72104bfbd24a77844bad5fabc86485b036f988ea927d1780782a6680 +Status: Downloaded newer image for wurstmeister/zookeeper:latest +docker.io/wurstmeister/zookeeper:latest +2.11-1.0.2: Pulling from wurstmeister/kafka +540db60ca938: Pulling fs layer +f0698009749d: Pulling fs layer +6f908e2198d8: Pulling fs layer +03d1b1f23ba0: Pulling fs layer +7e646c44bafc: Pulling fs layer +03d1b1f23ba0: Waiting +7e646c44bafc: Waiting +540db60ca938: Verifying Checksum +540db60ca938: Download complete +6f908e2198d8: Verifying Checksum +6f908e2198d8: Download complete +7e646c44bafc: Verifying Checksum +7e646c44bafc: Download complete +540db60ca938: Pull complete +f0698009749d: Verifying Checksum +f0698009749d: Download complete +03d1b1f23ba0: Verifying Checksum +03d1b1f23ba0: Download complete +f0698009749d: Pull complete +6f908e2198d8: Pull complete +03d1b1f23ba0: Pull complete +7e646c44bafc: Pull complete +Digest: sha256:ec1098369b4ccea77489b233172789c8ac29b545b9243545386549c52d07785b +Status: Downloaded newer image for wurstmeister/kafka:2.11-1.0.2 +docker.io/wurstmeister/kafka:2.11-1.0.2 +Using default tag: latest +latest: Pulling from library/mongo +d7bfe07ed847: Pulling fs layer +97ef66a8492a: Pulling fs layer +20cec14c8f9e: Pulling fs layer +38c3018eb09a: Pulling fs layer +ccc9e1c2556b: Pulling fs layer +593c62d03532: Pulling fs layer +1a103a446c3f: Pulling fs layer +be887b845d3f: Pulling fs layer +e5543880b183: Pulling fs layer +ccc9e1c2556b: Waiting +593c62d03532: Waiting +1a103a446c3f: Waiting +be887b845d3f: Waiting +e5543880b183: Waiting +38c3018eb09a: Waiting +97ef66a8492a: Verifying Checksum +97ef66a8492a: Download complete +20cec14c8f9e: Verifying Checksum +20cec14c8f9e: Download complete +d7bfe07ed847: Verifying Checksum +d7bfe07ed847: Download complete +ccc9e1c2556b: Verifying Checksum +ccc9e1c2556b: Download complete +38c3018eb09a: Verifying Checksum +38c3018eb09a: Download complete +593c62d03532: Verifying Checksum +593c62d03532: Download complete +1a103a446c3f: Verifying Checksum +1a103a446c3f: Download complete +e5543880b183: Verifying Checksum +e5543880b183: Download complete +d7bfe07ed847: Pull complete +97ef66a8492a: Pull complete +20cec14c8f9e: Pull complete +be887b845d3f: Verifying Checksum +be887b845d3f: Download complete +38c3018eb09a: Pull complete +ccc9e1c2556b: Pull complete +593c62d03532: Pull complete +1a103a446c3f: Pull complete +be887b845d3f: Pull complete +e5543880b183: Pull complete +Digest: sha256:37e84d3dd30cdfb5472ec42b8a6b4dc6ca7cacd91ebcfa0410a54528bbc5fa6d +Status: Downloaded newer image for mongo:latest +docker.io/library/mongo:latest +v2.28.1: Pulling from prom/prometheus +aa2a8d90b84c: Pulling fs layer +b45d31ee2d7f: Pulling fs layer +da9de9139824: Pulling fs layer +d04e751b88d5: Pulling fs layer +13f11ea3536c: Pulling fs layer +1d81771985c9: Pulling fs layer +d471c28936c9: Pulling fs layer +827e29e97e58: Pulling fs layer +9a0bd55ef653: Pulling fs layer +16e358518d2f: Pulling fs layer +bfdb42c9d185: Pulling fs layer +d83e6d5e5f1b: Pulling fs layer +d471c28936c9: Waiting +d04e751b88d5: Waiting +13f11ea3536c: Waiting +1d81771985c9: Waiting +827e29e97e58: Waiting +9a0bd55ef653: Waiting +16e358518d2f: Waiting +bfdb42c9d185: Waiting +d83e6d5e5f1b: Waiting +b45d31ee2d7f: Verifying Checksum +b45d31ee2d7f: Download complete +aa2a8d90b84c: Verifying Checksum +aa2a8d90b84c: Download complete +13f11ea3536c: Verifying Checksum +13f11ea3536c: Download complete +aa2a8d90b84c: Pull complete +1d81771985c9: Verifying Checksum +1d81771985c9: Download complete +da9de9139824: Verifying Checksum +da9de9139824: Download complete +d471c28936c9: Verifying Checksum +d471c28936c9: Download complete +827e29e97e58: Verifying Checksum +827e29e97e58: Download complete +d04e751b88d5: Verifying Checksum +d04e751b88d5: Download complete +9a0bd55ef653: Verifying Checksum +9a0bd55ef653: Download complete +b45d31ee2d7f: Pull complete +16e358518d2f: Verifying Checksum +16e358518d2f: Download complete +bfdb42c9d185: Verifying Checksum +bfdb42c9d185: Download complete +d83e6d5e5f1b: Verifying Checksum +d83e6d5e5f1b: Download complete +da9de9139824: Pull complete +d04e751b88d5: Pull complete +13f11ea3536c: Pull complete +1d81771985c9: Pull complete +d471c28936c9: Pull complete +827e29e97e58: Pull complete +9a0bd55ef653: Pull complete +16e358518d2f: Pull complete +bfdb42c9d185: Pull complete +d83e6d5e5f1b: Pull complete +Digest: sha256:5c030438c1e4c86bdc7428f24ee1ad18476eefdfa8a7f76a8ccc9b74f1970d81 +Status: Downloaded newer image for prom/prometheus:v2.28.1 +docker.io/prom/prometheus:v2.28.1 +latest: Pulling from google/cadvisor +ff3a5c916c92: Pulling fs layer +44a45bb65cdf: Pulling fs layer +0bbe1a2fe2a6: Pulling fs layer +ff3a5c916c92: Verifying Checksum +ff3a5c916c92: Download complete +0bbe1a2fe2a6: Verifying Checksum +0bbe1a2fe2a6: Download complete +44a45bb65cdf: Verifying Checksum +44a45bb65cdf: Download complete +ff3a5c916c92: Pull complete +44a45bb65cdf: Pull complete +0bbe1a2fe2a6: Pull complete +Digest: sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04 +Status: Downloaded newer image for google/cadvisor:latest +docker.io/google/cadvisor:latest +8.1.1: Pulling from grafana/grafana +540db60ca938: Already exists +b91d21fc1834: Pulling fs layer +94b744eb7640: Pulling fs layer +06133c221b55: Pulling fs layer +dc4b422eea7f: Pulling fs layer +4f4fb700ef54: Pulling fs layer +581ba695933f: Pulling fs layer +688726506e9e: Pulling fs layer +dc4b422eea7f: Waiting +4f4fb700ef54: Waiting +581ba695933f: Waiting +688726506e9e: Waiting +94b744eb7640: Verifying Checksum +94b744eb7640: Download complete +b91d21fc1834: Verifying Checksum +b91d21fc1834: Download complete +b91d21fc1834: Pull complete +06133c221b55: Download complete +4f4fb700ef54: Verifying Checksum +4f4fb700ef54: Download complete +581ba695933f: Verifying Checksum +581ba695933f: Download complete +688726506e9e: Verifying Checksum +688726506e9e: Download complete +94b744eb7640: Pull complete +dc4b422eea7f: Verifying Checksum +dc4b422eea7f: Download complete +06133c221b55: Pull complete +dc4b422eea7f: Pull complete +4f4fb700ef54: Pull complete +581ba695933f: Pull complete +688726506e9e: Pull complete +Digest: sha256:c328587322a83c38384cb3799387c3eb3c5b96d31ca5579439400694868f5007 +Status: Downloaded newer image for grafana/grafana:8.1.1 +docker.io/grafana/grafana:8.1.1 +1.15.6: Pulling from kiwigrid/k8s-sidecar +59bf1c3509f3: Already exists +07a400e93df3: Pulling fs layer +812206835f4c: Pulling fs layer +73342013dbd4: Pulling fs layer +27450111daa5: Pulling fs layer +b6dc38f52fdb: Pulling fs layer +c36e6a83ef93: Pulling fs layer +7b23ce5cb56a: Pulling fs layer +27450111daa5: Waiting +b6dc38f52fdb: Waiting +c36e6a83ef93: Waiting +7b23ce5cb56a: Waiting +07a400e93df3: Verifying Checksum +07a400e93df3: Download complete +73342013dbd4: Verifying Checksum +73342013dbd4: Download complete +b6dc38f52fdb: Verifying Checksum +b6dc38f52fdb: Download complete +27450111daa5: Verifying Checksum +27450111daa5: Download complete +812206835f4c: Verifying Checksum +812206835f4c: Download complete +c36e6a83ef93: Verifying Checksum +c36e6a83ef93: Download complete +07a400e93df3: Pull complete +7b23ce5cb56a: Verifying Checksum +7b23ce5cb56a: Download complete +812206835f4c: Pull complete +73342013dbd4: Pull complete +27450111daa5: Pull complete +b6dc38f52fdb: Pull complete +c36e6a83ef93: Pull complete +7b23ce5cb56a: Pull complete +Digest: sha256:1f025ae37b7b20d63bffd179e5e6f972039dd53d9646388c0a8c456229c7bbcb +Status: Downloaded newer image for kiwigrid/k8s-sidecar:1.15.6 +docker.io/kiwigrid/k8s-sidecar:1.15.6 +10: Pulling from library/mariadb +405f018f9d1d: Pulling fs layer +7a85079b8234: Pulling fs layer +579c7ff691b1: Pulling fs layer +4976663b5d6d: Pulling fs layer +169024b1fb13: Pulling fs layer +c0ffe8ce897f: Pulling fs layer +b583c09d23c3: Pulling fs layer +9b9f0c08d08f: Pulling fs layer +9cd51f984586: Pulling fs layer +d9f506bb8aca: Pulling fs layer +24d689f79ba4: Pulling fs layer +4976663b5d6d: Waiting +c0ffe8ce897f: Waiting +b583c09d23c3: Waiting +9b9f0c08d08f: Waiting +9cd51f984586: Waiting +d9f506bb8aca: Waiting +24d689f79ba4: Waiting +169024b1fb13: Waiting +7a85079b8234: Verifying Checksum +7a85079b8234: Download complete +579c7ff691b1: Verifying Checksum +579c7ff691b1: Download complete +4976663b5d6d: Verifying Checksum +4976663b5d6d: Download complete +169024b1fb13: Verifying Checksum +169024b1fb13: Download complete +405f018f9d1d: Download complete +b583c09d23c3: Verifying Checksum +b583c09d23c3: Download complete +9b9f0c08d08f: Verifying Checksum +9b9f0c08d08f: Download complete +c0ffe8ce897f: Download complete +d9f506bb8aca: Verifying Checksum +d9f506bb8aca: Download complete +24d689f79ba4: Verifying Checksum +24d689f79ba4: Download complete +9cd51f984586: Verifying Checksum +9cd51f984586: Download complete +405f018f9d1d: Pull complete +7a85079b8234: Pull complete +579c7ff691b1: Pull complete +4976663b5d6d: Pull complete +169024b1fb13: Pull complete +c0ffe8ce897f: Pull complete +b583c09d23c3: Pull complete +9b9f0c08d08f: Pull complete +9cd51f984586: Pull complete +d9f506bb8aca: Pull complete +24d689f79ba4: Pull complete +Digest: sha256:88fcb7d92c7f61cd885c4d309c98461f3607aa6dbd57a2474be86e1956b36d13 +Status: Downloaded newer image for mariadb:10 +docker.io/library/mariadb:10 +5: Pulling from library/mysql +c1ad9731b2c7: Pulling fs layer +54f6eb0ee84d: Pulling fs layer +cffcf8691bc5: Pulling fs layer +89a783b5ac8a: Pulling fs layer +6a8393c7be5f: Pulling fs layer +af768d0b181e: Pulling fs layer +810d6aaaf54a: Pulling fs layer +81fe6daf2395: Pulling fs layer +5ccf426818fd: Pulling fs layer +68b838b06054: Pulling fs layer +1b606c4f93df: Pulling fs layer +89a783b5ac8a: Waiting +6a8393c7be5f: Waiting +af768d0b181e: Waiting +810d6aaaf54a: Waiting +81fe6daf2395: Waiting +5ccf426818fd: Waiting +68b838b06054: Waiting +1b606c4f93df: Waiting +54f6eb0ee84d: Verifying Checksum +54f6eb0ee84d: Download complete +cffcf8691bc5: Verifying Checksum +cffcf8691bc5: Download complete +89a783b5ac8a: Verifying Checksum +89a783b5ac8a: Download complete +c1ad9731b2c7: Verifying Checksum +c1ad9731b2c7: Download complete +6a8393c7be5f: Verifying Checksum +6a8393c7be5f: Download complete +810d6aaaf54a: Verifying Checksum +810d6aaaf54a: Download complete +81fe6daf2395: Verifying Checksum +81fe6daf2395: Download complete +68b838b06054: Verifying Checksum +68b838b06054: Download complete +af768d0b181e: Verifying Checksum +af768d0b181e: Download complete +1b606c4f93df: Verifying Checksum +1b606c4f93df: Download complete +5ccf426818fd: Verifying Checksum +5ccf426818fd: Download complete +c1ad9731b2c7: Pull complete +54f6eb0ee84d: Pull complete +cffcf8691bc5: Pull complete +89a783b5ac8a: Pull complete +6a8393c7be5f: Pull complete +af768d0b181e: Pull complete +810d6aaaf54a: Pull complete +81fe6daf2395: Pull complete +5ccf426818fd: Pull complete +68b838b06054: Pull complete +1b606c4f93df: Pull complete +Digest: sha256:7e99b2b8d5bca914ef31059858210f57b009c40375d647f0d4d65ecd01d6b1d5 +Status: Downloaded newer image for mysql:5 +docker.io/library/mysql:5 +Pulling OSM docker images +Pulling opensourcemano/mon:10 docker image +10: Pulling from opensourcemano/mon +d7bfe07ed847: Already exists +666000499d5e: Pulling fs layer +84a996cd9152: Pulling fs layer +5ddf9fe3b18e: Pulling fs layer +83d95b5d2abe: Pulling fs layer +e521c6a1c4d9: Pulling fs layer +b1508559ed03: Pulling fs layer +6a982fafa1b7: Pulling fs layer +549b7eb92cad: Pulling fs layer +c9de50434a02: Pulling fs layer +c35f1ae73537: Pulling fs layer +31cf9385e1a8: Pulling fs layer +1d47bf441f80: Pulling fs layer +83d95b5d2abe: Waiting +e521c6a1c4d9: Waiting +b1508559ed03: Waiting +6a982fafa1b7: Waiting +549b7eb92cad: Waiting +c9de50434a02: Waiting +c35f1ae73537: Waiting +31cf9385e1a8: Waiting +1d47bf441f80: Waiting +84a996cd9152: Verifying Checksum +84a996cd9152: Download complete +5ddf9fe3b18e: Verifying Checksum +5ddf9fe3b18e: Download complete +e521c6a1c4d9: Verifying Checksum +e521c6a1c4d9: Download complete +666000499d5e: Verifying Checksum +666000499d5e: Download complete +b1508559ed03: Verifying Checksum +b1508559ed03: Download complete +6a982fafa1b7: Verifying Checksum +6a982fafa1b7: Download complete +549b7eb92cad: Verifying Checksum +549b7eb92cad: Download complete +83d95b5d2abe: Verifying Checksum +83d95b5d2abe: Download complete +31cf9385e1a8: Verifying Checksum +31cf9385e1a8: Download complete +1d47bf441f80: Verifying Checksum +1d47bf441f80: Download complete +666000499d5e: Pull complete +84a996cd9152: Pull complete +c35f1ae73537: Verifying Checksum +c35f1ae73537: Download complete +c9de50434a02: Verifying Checksum +c9de50434a02: Download complete +5ddf9fe3b18e: Pull complete +83d95b5d2abe: Pull complete +e521c6a1c4d9: Pull complete +b1508559ed03: Pull complete +6a982fafa1b7: Pull complete +549b7eb92cad: Pull complete +c9de50434a02: Pull complete +c35f1ae73537: Pull complete +31cf9385e1a8: Pull complete +1d47bf441f80: Pull complete +Digest: sha256:f2949ef49ec402a7c958b027caaf6bd7638d6d9a685cfbb7ea0bdb825ecf1c40 +Status: Downloaded newer image for opensourcemano/mon:10 +docker.io/opensourcemano/mon:10 +Pulling opensourcemano/pol:10 docker image +10: Pulling from opensourcemano/pol +d7bfe07ed847: Already exists +666000499d5e: Already exists +331ca7211c2c: Pulling fs layer +b4f0f255e526: Pulling fs layer +6e63054a5e3a: Pulling fs layer +ef79034d4247: Pulling fs layer +bd8855c0c0ed: Pulling fs layer +40e7b2a24af3: Pulling fs layer +847e72a2b5c4: Pulling fs layer +f519278aa5a7: Pulling fs layer +cf540be444c5: Pulling fs layer +6dd0660bb340: Pulling fs layer +40e7b2a24af3: Waiting +847e72a2b5c4: Waiting +f519278aa5a7: Waiting +cf540be444c5: Waiting +6dd0660bb340: Waiting +ef79034d4247: Waiting +bd8855c0c0ed: Waiting +6e63054a5e3a: Verifying Checksum +6e63054a5e3a: Download complete +331ca7211c2c: Verifying Checksum +b4f0f255e526: Verifying Checksum +b4f0f255e526: Download complete +40e7b2a24af3: Verifying Checksum +40e7b2a24af3: Download complete +ef79034d4247: Verifying Checksum +ef79034d4247: Download complete +331ca7211c2c: Pull complete +847e72a2b5c4: Verifying Checksum +847e72a2b5c4: Download complete +f519278aa5a7: Verifying Checksum +f519278aa5a7: Download complete +cf540be444c5: Verifying Checksum +cf540be444c5: Download complete +6dd0660bb340: Verifying Checksum +6dd0660bb340: Download complete +b4f0f255e526: Pull complete +6e63054a5e3a: Pull complete +ef79034d4247: Pull complete +bd8855c0c0ed: Download complete +bd8855c0c0ed: Pull complete +40e7b2a24af3: Pull complete +847e72a2b5c4: Pull complete +f519278aa5a7: Pull complete +cf540be444c5: Pull complete +6dd0660bb340: Pull complete +Digest: sha256:05796c1ddc55eb01ec049d4629a7ce6e29ff05467640884c3443a9cb306c9b7a +Status: Downloaded newer image for opensourcemano/pol:10 +docker.io/opensourcemano/pol:10 +Pulling opensourcemano/nbi:10 docker image +10: Pulling from opensourcemano/nbi +d7bfe07ed847: Already exists +666000499d5e: Already exists +a5c90c150150: Pulling fs layer +e4b817d2e65a: Pulling fs layer +eba3ed4bb1de: Pulling fs layer +b1ca26a333d5: Pulling fs layer +e905ec2173fb: Pulling fs layer +355efc6e6c91: Pulling fs layer +b1ca26a333d5: Waiting +e905ec2173fb: Waiting +355efc6e6c91: Waiting +eba3ed4bb1de: Verifying Checksum +eba3ed4bb1de: Download complete +a5c90c150150: Verifying Checksum +b1ca26a333d5: Verifying Checksum +b1ca26a333d5: Download complete +e905ec2173fb: Verifying Checksum +e905ec2173fb: Download complete +355efc6e6c91: Verifying Checksum +355efc6e6c91: Download complete +e4b817d2e65a: Verifying Checksum +e4b817d2e65a: Download complete +a5c90c150150: Pull complete +e4b817d2e65a: Pull complete +eba3ed4bb1de: Pull complete +b1ca26a333d5: Pull complete +e905ec2173fb: Pull complete +355efc6e6c91: Pull complete +Digest: sha256:a3d4e3e9ba4ab531d87016a44eb52ce55c690a43ad181181aec31d9890167ae2 +Status: Downloaded newer image for opensourcemano/nbi:10 +docker.io/opensourcemano/nbi:10 +Pulling opensourcemano/keystone:10 docker image +10: Pulling from opensourcemano/keystone +d7bfe07ed847: Already exists +9e9f60ff75cd: Pulling fs layer +5c3b8b0cef84: Pulling fs layer +42380b15e5b5: Pulling fs layer +64f8d91d37db: Pulling fs layer +5faf5e255491: Pulling fs layer +64f8d91d37db: Waiting +5faf5e255491: Waiting +9e9f60ff75cd: Verifying Checksum +5c3b8b0cef84: Download complete +9e9f60ff75cd: Pull complete +5c3b8b0cef84: Pull complete +5faf5e255491: Verifying Checksum +5faf5e255491: Download complete +64f8d91d37db: Verifying Checksum +64f8d91d37db: Download complete +42380b15e5b5: Verifying Checksum +42380b15e5b5: Download complete +42380b15e5b5: Pull complete +64f8d91d37db: Pull complete +5faf5e255491: Pull complete +Digest: sha256:7c928ce8affe6315ef280f3cc9084875760f7ae6bd261165b25cac191317f53b +Status: Downloaded newer image for opensourcemano/keystone:10 +docker.io/opensourcemano/keystone:10 +Pulling opensourcemano/ro:10 docker image +10: Pulling from opensourcemano/ro +d7bfe07ed847: Already exists +666000499d5e: Already exists +38ec5619b74b: Pulling fs layer +2e15239e15a6: Pulling fs layer +25fed18cded9: Pulling fs layer +526f83fab57d: Pulling fs layer +a73d486191f0: Pulling fs layer +526f83fab57d: Waiting +a73d486191f0: Waiting +38ec5619b74b: Verifying Checksum +25fed18cded9: Download complete +526f83fab57d: Verifying Checksum +526f83fab57d: Download complete +a73d486191f0: Verifying Checksum +a73d486191f0: Download complete +38ec5619b74b: Pull complete +2e15239e15a6: Verifying Checksum +2e15239e15a6: Download complete +2e15239e15a6: Pull complete +25fed18cded9: Pull complete +526f83fab57d: Pull complete +a73d486191f0: Pull complete +Digest: sha256:c81de050cafaf8a7bfd17f169911e35db7ca399e7cbb44844fc202a18f37b24e +Status: Downloaded newer image for opensourcemano/ro:10 +docker.io/opensourcemano/ro:10 +Pulling opensourcemano/lcm:10 docker image +10: Pulling from opensourcemano/lcm +d7bfe07ed847: Already exists +666000499d5e: Already exists +84a996cd9152: Already exists +c72aaef6aaca: Pulling fs layer +b0b0eca3e362: Pulling fs layer +fc121576b870: Pulling fs layer +f33fd8c262d4: Pulling fs layer +3f88104e5ad5: Pulling fs layer +6182ae1ea9d5: Pulling fs layer +f56144a0caab: Pulling fs layer +d5cbfaf6d151: Pulling fs layer +87377ac48274: Pulling fs layer +e8a04fe4ab11: Pulling fs layer +24faec989806: Pulling fs layer +a668aecb68cc: Pulling fs layer +030993b75ae4: Pulling fs layer +55471db6284d: Pulling fs layer +f33fd8c262d4: Waiting +3f88104e5ad5: Waiting +6182ae1ea9d5: Waiting +f56144a0caab: Waiting +d5cbfaf6d151: Waiting +87377ac48274: Waiting +e8a04fe4ab11: Waiting +24faec989806: Waiting +a668aecb68cc: Waiting +030993b75ae4: Waiting +55471db6284d: Waiting +c72aaef6aaca: Verifying Checksum +c72aaef6aaca: Download complete +fc121576b870: Verifying Checksum +fc121576b870: Download complete +b0b0eca3e362: Verifying Checksum +b0b0eca3e362: Download complete +f33fd8c262d4: Verifying Checksum +f33fd8c262d4: Download complete +c72aaef6aaca: Pull complete +6182ae1ea9d5: Verifying Checksum +6182ae1ea9d5: Download complete +3f88104e5ad5: Verifying Checksum +3f88104e5ad5: Download complete +d5cbfaf6d151: Verifying Checksum +d5cbfaf6d151: Download complete +f56144a0caab: Download complete +24faec989806: Verifying Checksum +24faec989806: Download complete +a668aecb68cc: Verifying Checksum +a668aecb68cc: Download complete +030993b75ae4: Verifying Checksum +030993b75ae4: Download complete +55471db6284d: Verifying Checksum +55471db6284d: Download complete +87377ac48274: Verifying Checksum +87377ac48274: Download complete +e8a04fe4ab11: Verifying Checksum +e8a04fe4ab11: Download complete +b0b0eca3e362: Pull complete +fc121576b870: Pull complete +f33fd8c262d4: Pull complete +3f88104e5ad5: Pull complete +6182ae1ea9d5: Pull complete +f56144a0caab: Pull complete +d5cbfaf6d151: Pull complete +87377ac48274: Pull complete +e8a04fe4ab11: Pull complete +24faec989806: Pull complete +a668aecb68cc: Pull complete +030993b75ae4: Pull complete +55471db6284d: Pull complete +Digest: sha256:0ee76c9b9a00bb5c0fc91f4ed1718b5d3996d5754f49bb07186b6324bc106e83 +Status: Downloaded newer image for opensourcemano/lcm:10 +docker.io/opensourcemano/lcm:10 +Pulling opensourcemano/ng-ui:10 docker image +10: Pulling from opensourcemano/ng-ui +d7bfe07ed847: Already exists +9f115457b1a8: Pulling fs layer +efe1d7eba861: Pulling fs layer +5c807a19b6d9: Pulling fs layer +b6ff86d608be: Pulling fs layer +b6ff86d608be: Waiting +5c807a19b6d9: Verifying Checksum +5c807a19b6d9: Download complete +efe1d7eba861: Download complete +b6ff86d608be: Verifying Checksum +b6ff86d608be: Download complete +9f115457b1a8: Verifying Checksum +9f115457b1a8: Download complete +9f115457b1a8: Pull complete +efe1d7eba861: Pull complete +5c807a19b6d9: Pull complete +b6ff86d608be: Pull complete +Digest: sha256:ea3e575bc36748c4863bc03e6cc9c9e42e1b19a271f8f4c280b6b24e366e261f +Status: Downloaded newer image for opensourcemano/ng-ui:10 +docker.io/opensourcemano/ng-ui:10 +Pulling opensourcemano/osmclient:10 docker image +10: Pulling from opensourcemano/osmclient +d7bfe07ed847: Already exists +666000499d5e: Already exists +96d66c72f8e6: Pulling fs layer +2f1097e8c8db: Pulling fs layer +96d66c72f8e6: Verifying Checksum +96d66c72f8e6: Download complete +2f1097e8c8db: Verifying Checksum +2f1097e8c8db: Download complete +96d66c72f8e6: Pull complete +2f1097e8c8db: Pull complete +Digest: sha256:6bb8595031c6f4540fa0fd86f78878aa9dbd7b57bb40bcf87d481f500a52af7c +Status: Downloaded newer image for opensourcemano/osmclient:10 +docker.io/opensourcemano/osmclient:10 +Finished pulling and generating docker images +Doing a backup of existing env files +cp: cannot stat '/etc/osm/docker/keystone-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/keystone.env': No such file or directory +cp: cannot stat '/etc/osm/docker/lcm.env': No such file or directory +cp: cannot stat '/etc/osm/docker/mon.env': No such file or directory +cp: cannot stat '/etc/osm/docker/nbi.env': No such file or directory +cp: cannot stat '/etc/osm/docker/pol.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro.env': No such file or directory +Generating docker env files +OSMLCM_DATABASE_COMMONKEY=h9ud8JHJl3oR1at0Uft43LwDeWqM2ONV +OSMLCM_VCA_HOST=192.168.64.22 +OSMLCM_VCA_SECRET=193f97a8f2e5d381b1c131c5f2187e3d +OSMLCM_VCA_PUBKEY=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGUIr+ppBgJtJx5vtX1DgPbvB1kmFZaREBEUhQV0iCrmQm8TIvC8weKMcXoIkODCIzc3/8/G9QEeCwVAclbH/gOgBlb2tjpvQVtURmnFn7SeKbrMcw8K9dnI2WZxC/l5ERS3jybLbpCEKGGywTpL+jthLkjwyFdHXECRcyBlRZRVTbhqU8snGyHEGWzLEp2lXxceCrGEm2+mpL5/Ucy5v0m+tztJf8WAAe1UTR1TaW3AGAQofX5K2dCdbLtqaJPipQG2XWb7zwNOA80YFumUgcMcdxIEYjpQ6qDNke9k42EOO6tn/AY161Ma7D9XFmqEYlI3vH9ZHmFPBJsbgZDn/7 juju-client-key +OSMLCM_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1jSGhxNmNLK0IrRURCdWd3STlyNTZlYW5BMU1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TmpFMgpNRGt6TkRRNFdoY05Nekl3TmpFMk1Ea3pPVFE0V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBeTBhMnZXOFEKT05wMU8rYmJyT0VFYTE3S0R0ZHEybXdyZEQ2VVRsOVpZSzVpSUtBdXVhUGwzWnNUNkMxYmljOTVZOGZBVHJwcgpWUFZScDI1WmZEanU5cFE4QUZDcC9IVEZmZjIxK0lCUnRrcHUwVVR0SWNoNk8xZ0N0ZWhtaFl4MWlDVnJ6Tkl2ClNudEYzTjhOK092NGJSTGRYOHI0K0R4MWsvVHZXYTFoYTZjMVAwcWQyaWhpcU9HZExlRU5pWWYxQlc3RXNFMjcKU0xLVHM0Y3lzdWNteW00SWo2SjQwOWNsUmVpa1ZoTE15WktMUjRNRjN5VTNSUEUyK3ozNTdwRjFwUy8wenRTdApldmpmdFNkQ2M1alJ1OGJNYllyMjBpTjc0MWR0Q0FUMXZzQXZhOE1VZHc2NytBQmFsNG9DWi80ajRpOXNaRmM3Cm9Ccmc3K0dJWEt3Yk5Tb1A2TDFBajB0Z3FWQ044bWxjVVphTFkvZFZPaEdScS95SmhGQVZXVjh2MjA5WkhsWWsKYzNNdUg4bDdjZytQVDlVV00zQzE2TzQzaFo5REZxVHF3TXZBWHd0djJUbzZrdW5GTVQzSWQ1RHV3Uk9GL1lPbApmOEp0SWpsQlV3QVZjVUhqbll0MEY3TzlzdjluRjdJekYrUXhraVBYMitEWkIzY2J5TXA5SGhkbEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUjIKQkVnbU15cUJGNWhzdGNOREFRWEI3MXRVU2pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWszRjVzSG5DMkJNTApXWmlRN2NNQWtZd1JsdGpGOTBLWU1hSzEzYjhlNzZSNyt1UUZuSm5HN2x1dy9HanB3cS9TNnBwTHc2Tnc4U1JpCktYbjZ1N1BFNWNGN3pGWWNHT2pUOTBwTmtqTDYvVHNsbjF6R3RCWVdQZFVwbXVjNGFKclc4aEZ1TG0yRWFFaW0KQ09sTVZZNEZsZWpLampPR2pzZ09YaVJGdGdKM25rZ0Z3STl0ZE5tTDQ4V1p5TFQ1WlhmRTNneE54aTF5bUpyagpYNGJFTkdHa2JXQXJnMCtZeUZPMU1vZGExSHgxWlRGZXdVZWlKcVVzak5IWmd2VnZGRmlzY1NtQWRPNlloeFFQClljUEE1bnBPWFFyOUR1WVFrVGZ0dk5wSnNHNmhBS2dKNkRpV1doZWNDRm9rTkpnMVBVNE56dlZDVmF3cERyK1IKTEJBaGFtSEtSWWtGamdPM1QrYzRpbTJNaGtKdGJ0YnRUeTNGS1pzWUpCTG11dTUySkhUM0NPZkJEQVVCb3daNApuL2lPOGV1YnkwaHpqd2szaExXYVp1cTZaRzJHdUw0TVZlYXhLQU9JMURGRDJyQ1h1ckZ2OGM3S1pncU0rdStTCkVud29qQUtQelAyKzBuY0Fic2lEUjdScTl4VnJxMitjcE9ZZ3dBL2FQSGxUQnFST3FBcW4KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK +# OSMLCM_VCA_ENABLEOSUPGRADE=false +# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/ +OSMLCM_VCA_CLOUD=lxd-cloud +OSMLCM_VCA_K8S_CLOUD=k8scloud +MYSQL_ROOT_PASSWORD=eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti +RO_DB_ROOT_PASSWORD=eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti +OSMRO_DATABASE_COMMONKEY=h9ud8JHJl3oR1at0Uft43LwDeWqM2ONV +MYSQL_ROOT_PASSWORD=eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti +ROOT_DB_PASSWORD=eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti +KEYSTONE_DB_PASSWORD=FIY66gpXRqWsAik6VBM9DjYmEFZz5oeH +SERVICE_PASSWORD=4e1m8TX2pd75KxpH7Y4f2BwIj8H74d67 +OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=4e1m8TX2pd75KxpH7Y4f2BwIj8H74d67 +OSMNBI_DATABASE_COMMONKEY=h9ud8JHJl3oR1at0Uft43LwDeWqM2ONV +OSMMON_KEYSTONE_SERVICE_PASSWORD=4e1m8TX2pd75KxpH7Y4f2BwIj8H74d67 +OSMMON_DATABASE_COMMONKEY=h9ud8JHJl3oR1at0Uft43LwDeWqM2ONV +OSMMON_SQL_DATABASE_URI=mysql://root:eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti@mysql:3306/mon +OS_NOTIFIER_URI=http://192.168.64.22:8662 +OSMMON_VCA_HOST=192.168.64.22 +OSMMON_VCA_SECRET=193f97a8f2e5d381b1c131c5f2187e3d +OSMMON_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1jSGhxNmNLK0IrRURCdWd3STlyNTZlYW5BMU1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TmpFMgpNRGt6TkRRNFdoY05Nekl3TmpFMk1Ea3pPVFE0V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBeTBhMnZXOFEKT05wMU8rYmJyT0VFYTE3S0R0ZHEybXdyZEQ2VVRsOVpZSzVpSUtBdXVhUGwzWnNUNkMxYmljOTVZOGZBVHJwcgpWUFZScDI1WmZEanU5cFE4QUZDcC9IVEZmZjIxK0lCUnRrcHUwVVR0SWNoNk8xZ0N0ZWhtaFl4MWlDVnJ6Tkl2ClNudEYzTjhOK092NGJSTGRYOHI0K0R4MWsvVHZXYTFoYTZjMVAwcWQyaWhpcU9HZExlRU5pWWYxQlc3RXNFMjcKU0xLVHM0Y3lzdWNteW00SWo2SjQwOWNsUmVpa1ZoTE15WktMUjRNRjN5VTNSUEUyK3ozNTdwRjFwUy8wenRTdApldmpmdFNkQ2M1alJ1OGJNYllyMjBpTjc0MWR0Q0FUMXZzQXZhOE1VZHc2NytBQmFsNG9DWi80ajRpOXNaRmM3Cm9Ccmc3K0dJWEt3Yk5Tb1A2TDFBajB0Z3FWQ044bWxjVVphTFkvZFZPaEdScS95SmhGQVZXVjh2MjA5WkhsWWsKYzNNdUg4bDdjZytQVDlVV00zQzE2TzQzaFo5REZxVHF3TXZBWHd0djJUbzZrdW5GTVQzSWQ1RHV3Uk9GL1lPbApmOEp0SWpsQlV3QVZjVUhqbll0MEY3TzlzdjluRjdJekYrUXhraVBYMitEWkIzY2J5TXA5SGhkbEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUjIKQkVnbU15cUJGNWhzdGNOREFRWEI3MXRVU2pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWszRjVzSG5DMkJNTApXWmlRN2NNQWtZd1JsdGpGOTBLWU1hSzEzYjhlNzZSNyt1UUZuSm5HN2x1dy9HanB3cS9TNnBwTHc2Tnc4U1JpCktYbjZ1N1BFNWNGN3pGWWNHT2pUOTBwTmtqTDYvVHNsbjF6R3RCWVdQZFVwbXVjNGFKclc4aEZ1TG0yRWFFaW0KQ09sTVZZNEZsZWpLampPR2pzZ09YaVJGdGdKM25rZ0Z3STl0ZE5tTDQ4V1p5TFQ1WlhmRTNneE54aTF5bUpyagpYNGJFTkdHa2JXQXJnMCtZeUZPMU1vZGExSHgxWlRGZXdVZWlKcVVzak5IWmd2VnZGRmlzY1NtQWRPNlloeFFQClljUEE1bnBPWFFyOUR1WVFrVGZ0dk5wSnNHNmhBS2dKNkRpV1doZWNDRm9rTkpnMVBVNE56dlZDVmF3cERyK1IKTEJBaGFtSEtSWWtGamdPM1QrYzRpbTJNaGtKdGJ0YnRUeTNGS1pzWUpCTG11dTUySkhUM0NPZkJEQVVCb3daNApuL2lPOGV1YnkwaHpqd2szaExXYVp1cTZaRzJHdUw0TVZlYXhLQU9JMURGRDJyQ1h1ckZ2OGM3S1pncU0rdStTCkVud29qQUtQelAyKzBuY0Fic2lEUjdScTl4VnJxMitjcE9ZZ3dBL2FQSGxUQnFST3FBcW4KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK +OSMPOL_SQL_DATABASE_URI=mysql://root:eu3CFlvTDXIFCH4lAMlzHYWC1kRTDJti@mysql:3306/pol +Finished generation of docker env files +Added 'osm' model on k8scloud with credential 'k8scloud' for user 'admin' +Located charm "mongodb-k8s" in charm-hub, revision 1 +Deploying "mongodb-k8s" from charm-hub charm "mongodb-k8s", revision 1 in channel stable on focal +Error from server (AlreadyExists): namespaces "osm" already exists +secret/lcm-secret created +secret/mon-secret created +secret/nbi-secret created +secret/ro-db-secret created +secret/ro-secret created +secret/keystone-secret created +secret/pol-secret created +clusterrole.rbac.authorization.k8s.io/grafana-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/grafana-clusterrolebinding created +secret/grafana created +serviceaccount/grafana created +configmap/grafana-dashboard-provider created +configmap/grafana-datasource created +configmap/grafana created +deployment.apps/grafana created +service/grafana created +service/kafka created +statefulset.apps/kafka created +service/keystone created +deployment.apps/keystone created +deployment.apps/lcm created +service/mon created +deployment.apps/mon created +service/mysql created +statefulset.apps/mysql created +service/nbi created +deployment.apps/nbi created +service/ng-ui created +deployment.apps/ng-ui created +deployment.apps/pol created +service/prometheus created +configmap/prom created +statefulset.apps/prometheus created +service/ro created +deployment.apps/ro created +service/zookeeper created +statefulset.apps/zookeeper created +sed: can't read /etc/osm/docker/osm_pla/pla.yaml: No such file or directory +error: the path "/etc/osm/docker/osm_pla" does not exist + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 3120 100 3120 0 0 4321 0 --:--:-- --:--:-- --:--:-- 4315 +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:7 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Get:8 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/IM amd64 Packages [897 B] +Get:9 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/osmclient amd64 Packages [476 B] +Fetched 1373 B in 8s (163 B/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:5 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-7 + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan4 libatomic1 libbinutils libc-dev-bin libc6-dev libcc1-0 libcilkrts5 + libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-7-dev + libgomp1 libisl19 libitm1 liblsan0 libmpc3 libmpx2 libpython3-dev + libpython3.6-dev libquadmath0 libstdc++-7-dev libtsan0 libubsan0 + linux-libc-dev make manpages-dev python-pip-whl python3-crypto python3-dev + python3-distutils python3-keyring python3-keyrings.alt python3-lib2to3 + python3-secretstorage python3-setuptools python3-wheel python3-xdg + python3.6-dev +Suggested packages: + binutils-doc cpp-doc gcc-7-locales debian-keyring g++-multilib + g++-7-multilib gcc-7-doc libstdc++6-7-dbg gcc-multilib autoconf automake + libtool flex bison gdb gcc-doc gcc-7-multilib libgcc1-dbg libgomp1-dbg + libitm1-dbg libatomic1-dbg libasan4-dbg liblsan0-dbg libtsan0-dbg + libubsan0-dbg libcilkrts5-dbg libmpx2-dbg libquadmath0-dbg glibc-doc bzr + libstdc++-7-doc make-doc python-crypto-doc gnome-keyring libkf5wallet-bin + gir1.2-gnomekeyring-1.0 python-secretstorage-doc python-setuptools-doc +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-7 + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan4 libatomic1 libbinutils libc-dev-bin libc6-dev libcc1-0 libcilkrts5 + libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-7-dev + libgomp1 libisl19 libitm1 liblsan0 libmpc3 libmpx2 libpython3-dev + libpython3.6-dev libquadmath0 libstdc++-7-dev libtsan0 libubsan0 + linux-libc-dev make manpages-dev python-pip-whl python3-crypto python3-dev + python3-distutils python3-keyring python3-keyrings.alt python3-lib2to3 + python3-pip python3-secretstorage python3-setuptools python3-wheel + python3-xdg python3.6-dev +0 upgraded, 57 newly installed, 0 to remove and 3 not upgraded. +Need to get 91.2 MB of archives. +After this operation, 253 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-common amd64 2.30-21ubuntu1~18.04.7 [197 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libbinutils amd64 2.30-21ubuntu1~18.04.7 [489 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.30-21ubuntu1~18.04.7 [1839 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils amd64 2.30-21ubuntu1~18.04.7 [3388 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc-dev-bin amd64 2.27-3ubuntu1.6 [71.9 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 linux-libc-dev amd64 4.15.0-187.198 [980 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev amd64 2.27-3ubuntu1.6 [2587 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7-base amd64 7.5.0-3ubuntu1~18.04 [18.3 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic/main amd64 libisl19 amd64 0.19-1 [551 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic/main amd64 libmpc3 amd64 1.1.0-1 [40.8 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp-7 amd64 7.5.0-3ubuntu1~18.04 [8591 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp amd64 4:7.4.0-1ubuntu2.3 [27.7 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcc1-0 amd64 8.4.0-1ubuntu1~18.04 [39.4 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgomp1 amd64 8.4.0-1ubuntu1~18.04 [76.5 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libitm1 amd64 8.4.0-1ubuntu1~18.04 [27.9 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libatomic1 amd64 8.4.0-1ubuntu1~18.04 [9192 B] +Get:17 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libasan4 amd64 7.5.0-3ubuntu1~18.04 [358 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 liblsan0 amd64 8.4.0-1ubuntu1~18.04 [133 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libtsan0 amd64 8.4.0-1ubuntu1~18.04 [288 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libubsan0 amd64 7.5.0-3ubuntu1~18.04 [126 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcilkrts5 amd64 7.5.0-3ubuntu1~18.04 [42.5 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmpx2 amd64 8.4.0-1ubuntu1~18.04 [11.6 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libquadmath0 amd64 8.4.0-1ubuntu1~18.04 [134 kB] +Get:24 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [2378 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7 amd64 7.5.0-3ubuntu1~18.04 [9381 kB] +Get:26 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc amd64 4:7.4.0-1ubuntu2.3 [5184 B] +Get:27 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libstdc++-7-dev amd64 7.5.0-3ubuntu1~18.04 [1471 kB] +Get:28 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++-7 amd64 7.5.0-3ubuntu1~18.04 [9697 kB] +Get:29 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++ amd64 4:7.4.0-1ubuntu2.3 [1568 B] +Get:30 http://archive.ubuntu.com/ubuntu bionic/main amd64 make amd64 4.1-9.1ubuntu1 [154 kB] +Get:31 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libdpkg-perl all 1.19.0.5ubuntu2.4 [212 kB] +Get:32 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 dpkg-dev all 1.19.0.5ubuntu2.4 [607 kB] +Get:33 http://archive.ubuntu.com/ubuntu bionic/main amd64 build-essential amd64 12.4ubuntu1 [4758 B] +Get:34 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-lib2to3 all 3.6.9-1~18.04 [77.4 kB] +Get:35 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-distutils all 3.6.9-1~18.04 [144 kB] +Get:36 http://archive.ubuntu.com/ubuntu bionic/main amd64 dh-python all 3.20180325ubuntu2 [89.2 kB] +Get:37 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfakeroot amd64 1.22-2ubuntu1 [25.9 kB] +Get:38 http://archive.ubuntu.com/ubuntu bionic/main amd64 fakeroot amd64 1.22-2ubuntu1 [62.3 kB] +Get:39 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:40 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-xs-perl amd64 0.04-5 [11.1 kB] +Get:41 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:42 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libexpat1-dev amd64 2.2.5-3ubuntu0.7 [124 kB] +Get:43 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfile-fcntllock-perl amd64 0.22-3build2 [33.2 kB] +Get:44 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3.6-dev amd64 3.6.9-1~18.04ubuntu1.7 [44.9 MB] +Get:45 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3-dev amd64 3.6.7-1~18.04 [7328 B] +Get:46 http://archive.ubuntu.com/ubuntu bionic/main amd64 manpages-dev all 4.15-1 [2217 kB] +Get:47 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-pip-whl all 9.0.1-2.3~ubuntu1.18.04.5 [1653 kB] +Get:48 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-crypto amd64 2.6.1-8ubuntu2 [244 kB] +Get:49 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3.6-dev amd64 3.6.9-1~18.04ubuntu1.7 [511 kB] +Get:50 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-dev amd64 3.6.7-1~18.04 [1288 B] +Get:51 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-secretstorage all 2.3.1-2 [12.1 kB] +Get:52 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyring all 10.6.0-1 [26.7 kB] +Get:53 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyrings.alt all 3.0-1 [16.6 kB] +Get:54 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python3-pip all 9.0.1-2.3~ubuntu1.18.04.5 [114 kB] +Get:55 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-setuptools all 39.0.1-2 [248 kB] +Get:56 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-wheel all 0.30.0-0.2 [36.5 kB] +Get:57 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-xdg all 0.25-4ubuntu1.1 [31.3 kB] +Fetched 91.2 MB in 12s (7704 kB/s) +Selecting previously unselected package binutils-common:amd64. +(Reading database ... 61696 files and directories currently installed.) +Preparing to unpack .../00-binutils-common_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils-common:amd64 (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../01-libbinutils_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking libbinutils:amd64 (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../02-binutils-x86-64-linux-gnu_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package binutils. +Preparing to unpack .../03-binutils_2.30-21ubuntu1~18.04.7_amd64.deb ... +Unpacking binutils (2.30-21ubuntu1~18.04.7) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../04-libc-dev-bin_2.27-3ubuntu1.6_amd64.deb ... +Unpacking libc-dev-bin (2.27-3ubuntu1.6) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../05-linux-libc-dev_4.15.0-187.198_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.15.0-187.198) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../06-libc6-dev_2.27-3ubuntu1.6_amd64.deb ... +Unpacking libc6-dev:amd64 (2.27-3ubuntu1.6) ... +Selecting previously unselected package gcc-7-base:amd64. +Preparing to unpack .../07-gcc-7-base_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libisl19:amd64. +Preparing to unpack .../08-libisl19_0.19-1_amd64.deb ... +Unpacking libisl19:amd64 (0.19-1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../09-libmpc3_1.1.0-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.1.0-1) ... +Selecting previously unselected package cpp-7. +Preparing to unpack .../10-cpp-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking cpp-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package cpp. +Preparing to unpack .../11-cpp_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking cpp (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../12-libcc1-0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../13-libgomp1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../14-libitm1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../15-libatomic1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libasan4:amd64. +Preparing to unpack .../16-libasan4_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../17-liblsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../18-libtsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../19-libubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../20-libcilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libmpx2:amd64. +Preparing to unpack .../21-libmpx2_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../22-libquadmath0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgcc-7-dev:amd64. +Preparing to unpack .../23-libgcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc-7. +Preparing to unpack .../24-gcc-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc. +Preparing to unpack .../25-gcc_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking gcc (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libstdc++-7-dev:amd64. +Preparing to unpack .../26-libstdc++-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++-7. +Preparing to unpack .../27-g++-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking g++-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++. +Preparing to unpack .../28-g++_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking g++ (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package make. +Preparing to unpack .../29-make_4.1-9.1ubuntu1_amd64.deb ... +Unpacking make (4.1-9.1ubuntu1) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../30-libdpkg-perl_1.19.0.5ubuntu2.4_all.deb ... +Unpacking libdpkg-perl (1.19.0.5ubuntu2.4) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../31-dpkg-dev_1.19.0.5ubuntu2.4_all.deb ... +Unpacking dpkg-dev (1.19.0.5ubuntu2.4) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../32-build-essential_12.4ubuntu1_amd64.deb ... +Unpacking build-essential (12.4ubuntu1) ... +Selecting previously unselected package python3-lib2to3. +Preparing to unpack .../33-python3-lib2to3_3.6.9-1~18.04_all.deb ... +Unpacking python3-lib2to3 (3.6.9-1~18.04) ... +Selecting previously unselected package python3-distutils. +Preparing to unpack .../34-python3-distutils_3.6.9-1~18.04_all.deb ... +Unpacking python3-distutils (3.6.9-1~18.04) ... +Selecting previously unselected package dh-python. +Preparing to unpack .../35-dh-python_3.20180325ubuntu2_all.deb ... +Unpacking dh-python (3.20180325ubuntu2) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../36-libfakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.22-2ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../37-fakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking fakeroot (1.22-2ubuntu1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../38-libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../39-libalgorithm-diff-xs-perl_0.04-5_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-5) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../40-libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../41-libexpat1-dev_2.2.5-3ubuntu0.7_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.2.5-3ubuntu0.7) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../42-libfile-fcntllock-perl_0.22-3build2_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3build2) ... +Selecting previously unselected package libpython3.6-dev:amd64. +Preparing to unpack .../43-libpython3.6-dev_3.6.9-1~18.04ubuntu1.7_amd64.deb ... +Unpacking libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.7) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../44-libpython3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.6.7-1~18.04) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../45-manpages-dev_4.15-1_all.deb ... +Unpacking manpages-dev (4.15-1) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../46-python-pip-whl_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-crypto. +Preparing to unpack .../47-python3-crypto_2.6.1-8ubuntu2_amd64.deb ... +Unpacking python3-crypto (2.6.1-8ubuntu2) ... +Selecting previously unselected package python3.6-dev. +Preparing to unpack .../48-python3.6-dev_3.6.9-1~18.04ubuntu1.7_amd64.deb ... +Unpacking python3.6-dev (3.6.9-1~18.04ubuntu1.7) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../49-python3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking python3-dev (3.6.7-1~18.04) ... +Selecting previously unselected package python3-secretstorage. +Preparing to unpack .../50-python3-secretstorage_2.3.1-2_all.deb ... +Unpacking python3-secretstorage (2.3.1-2) ... +Selecting previously unselected package python3-keyring. +Preparing to unpack .../51-python3-keyring_10.6.0-1_all.deb ... +Unpacking python3-keyring (10.6.0-1) ... +Selecting previously unselected package python3-keyrings.alt. +Preparing to unpack .../52-python3-keyrings.alt_3.0-1_all.deb ... +Unpacking python3-keyrings.alt (3.0-1) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../53-python3-pip_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../54-python3-setuptools_39.0.1-2_all.deb ... +Unpacking python3-setuptools (39.0.1-2) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../55-python3-wheel_0.30.0-0.2_all.deb ... +Unpacking python3-wheel (0.30.0-0.2) ... +Selecting previously unselected package python3-xdg. +Preparing to unpack .../56-python3-xdg_0.25-4ubuntu1.1_all.deb ... +Unpacking python3-xdg (0.25-4ubuntu1.1) ... +Setting up libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up make (4.1-9.1ubuntu1) ... +Setting up python3-crypto (2.6.1-8ubuntu2) ... +Setting up libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python3-xdg (0.25-4ubuntu1.1) ... +Setting up python3-keyrings.alt (3.0-1) ... +Setting up linux-libc-dev:amd64 (4.15.0-187.198) ... +Setting up libdpkg-perl (1.19.0.5ubuntu2.4) ... +Setting up python3-wheel (0.30.0-0.2) ... +Setting up liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up binutils-common:amd64 (2.30-21ubuntu1~18.04.7) ... +Setting up libfile-fcntllock-perl (0.22-3build2) ... +Setting up libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libfakeroot:amd64 (1.22-2ubuntu1) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libmpc3:amd64 (1.1.0-1) ... +Setting up libc-dev-bin (2.27-3ubuntu1.6) ... +Setting up python3-lib2to3 (3.6.9-1~18.04) ... +Setting up python3-secretstorage (2.3.1-2) ... +Setting up manpages-dev (4.15-1) ... +Setting up libc6-dev:amd64 (2.27-3ubuntu1.6) ... +Setting up python3-distutils (3.6.9-1~18.04) ... +Setting up libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libisl19:amd64 (0.19-1) ... +Setting up libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-keyring (10.6.0-1) ... +Setting up libbinutils:amd64 (2.30-21ubuntu1~18.04.7) ... +Setting up libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up fakeroot (1.22-2ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up cpp-7 (7.5.0-3ubuntu1~18.04) ... +Setting up libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libalgorithm-diff-xs-perl (0.04-5) ... +Setting up python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libexpat1-dev:amd64 (2.2.5-3ubuntu0.7) ... +Setting up python3-setuptools (39.0.1-2) ... +Setting up dh-python (3.20180325ubuntu2) ... +Setting up binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.7) ... +Setting up cpp (4:7.4.0-1ubuntu2.3) ... +Setting up libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.7) ... +Setting up binutils (2.30-21ubuntu1~18.04.7) ... +Setting up python3.6-dev (3.6.9-1~18.04ubuntu1.7) ... +Setting up libpython3-dev:amd64 (3.6.7-1~18.04) ... +Setting up gcc-7 (7.5.0-3ubuntu1~18.04) ... +Setting up g++-7 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-dev (3.6.7-1~18.04) ... +Setting up gcc (4:7.4.0-1ubuntu2.3) ... +Setting up dpkg-dev (1.19.0.5ubuntu2.4) ... +Setting up g++ (4:7.4.0-1ubuntu2.3) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up build-essential (12.4ubuntu1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.6) ... +Collecting pip + Downloading https://files.pythonhosted.org/packages/a4/6d/6463d49a933f547439d6b5b98b46af8742cc03ae83543e4d7688c2420f8b/pip-21.3.1-py3-none-any.whl (1.7MB) +Installing collected packages: pip + Found existing installation: pip 9.0.1 + Not uninstalling pip at /usr/lib/python3/dist-packages, outside environment /usr +Successfully installed pip-21.3.1 +Collecting python-magic + Downloading python_magic-0.4.27-py2.py3-none-any.whl (13 kB) +Collecting pyangbind + Downloading pyangbind-0.8.1.tar.gz (48 kB) + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Collecting verboselogs + Downloading verboselogs-1.7-py2.py3-none-any.whl (11 kB) +Collecting bitarray + Downloading bitarray-2.5.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (236 kB) +Collecting enum34 + Downloading enum34-1.1.10-py3-none-any.whl (11 kB) +Collecting lxml + Downloading lxml-4.9.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.4 MB) +Collecting pyang + Downloading pyang-2.5.3-py2.py3-none-any.whl (592 kB) +Collecting regex + Downloading regex-2022.6.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (749 kB) +Requirement already satisfied: six in /usr/lib/python3/dist-packages (from pyangbind) (1.11.0) +Building wheels for collected packages: pyangbind + Building wheel for pyangbind (setup.py): started + Building wheel for pyangbind (setup.py): finished with status 'done' + Created wheel for pyangbind: filename=pyangbind-0.8.1-py3-none-any.whl size=49314 sha256=6ac78baed0a9c69c5fcb9387ec96ca1e63d0cc895321f54cff7d7a47ef82fe68 + Stored in directory: /root/.cache/pip/wheels/d7/5f/16/210e82959deac8e57e539448ff940505a957125a521cb2a828 +Successfully built pyangbind +Installing collected packages: lxml, regex, pyang, enum34, bitarray, verboselogs, python-magic, pyangbind +Successfully installed bitarray-2.5.1 enum34-1.1.10 lxml-4.9.0 pyang-2.5.3 pyangbind-0.8.1 python-magic-0.4.27 regex-2022.6.2 verboselogs-1.7 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + python3-osm-im python3-osmclient +0 upgraded, 2 newly installed, 0 to remove and 3 not upgraded. +Need to get 250 kB of archives. +After this operation, 8189 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/IM amd64 python3-osm-im all 10.1.1+gd3b8c07-1 [186 kB] +Get:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/osmclient amd64 python3-osmclient all 10.1.1+gac8d207-1 [64.0 kB] +Fetched 250 kB in 0s (767 kB/s) +Selecting previously unselected package python3-osm-im. +(Reading database ... 68302 files and directories currently installed.) +Preparing to unpack .../python3-osm-im_10.1.1+gd3b8c07-1_all.deb ... +Unpacking python3-osm-im (10.1.1+gd3b8c07-1) ... +Selecting previously unselected package python3-osmclient. +Preparing to unpack .../python3-osmclient_10.1.1+gac8d207-1_all.deb ... +Unpacking python3-osmclient (10.1.1+gac8d207-1) ... +Setting up python3-osmclient (10.1.1+gac8d207-1) ... +Setting up python3-osm-im (10.1.1+gd3b8c07-1) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting bitarray==2.3.5 + Downloading bitarray-2.3.5.tar.gz (88 kB) + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: enum34==1.1.10 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 19)) (1.1.10) +Collecting lxml==4.7.1 + Downloading lxml-4.7.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.4 MB) +Collecting pyang==2.5.2 + Downloading pyang-2.5.2-py2.py3-none-any.whl (595 kB) +Requirement already satisfied: pyangbind==0.8.1 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 29)) (0.8.1) +Collecting pyyaml==5.4.1 + Downloading PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl (640 kB) +Collecting regex==2021.11.10 + Downloading regex-2021.11.10-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (748 kB) +Collecting six==1.16.0 + Downloading six-1.16.0-py2.py3-none-any.whl (11 kB) +Building wheels for collected packages: bitarray + Building wheel for bitarray (setup.py): started + Building wheel for bitarray (setup.py): finished with status 'done' + Created wheel for bitarray: filename=bitarray-2.3.5-cp36-cp36m-linux_x86_64.whl size=179236 sha256=e84a254fb92fddf1e40d3030566438973dce1625f0b1676b8d91507687070ffb + Stored in directory: /home/ubuntu/.cache/pip/wheels/b6/88/b1/11e997b83a46fd896ace064f835328c661db94816895770eb6 +Successfully built bitarray +Installing collected packages: lxml, six, regex, pyang, bitarray, pyyaml +Successfully installed bitarray-2.3.5 lxml-4.7.1 pyang-2.5.2 pyyaml-5.4.1 regex-2021.11.10 six-1.16.0 +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +Suggested packages: + libcurl4-doc libidn11-dev libkrb5-dev libldap2-dev librtmp-dev libssh2-1-dev + pkg-config zlib1g-dev libssl-doc +The following NEW packages will be installed: + libcurl4-openssl-dev libssl-dev +0 upgraded, 2 newly installed, 0 to remove and 3 not upgraded. +Need to get 1870 kB of archives. +After this operation, 9284 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcurl4-openssl-dev amd64 7.58.0-2ubuntu3.18 [302 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libssl-dev amd64 1.1.1-1ubuntu2.1~18.04.17 [1568 kB] +Fetched 1870 kB in 1s (1295 kB/s) +Selecting previously unselected package libcurl4-openssl-dev:amd64. +(Reading database ... 68386 files and directories currently installed.) +Preparing to unpack .../libcurl4-openssl-dev_7.58.0-2ubuntu3.18_amd64.deb ... +Unpacking libcurl4-openssl-dev:amd64 (7.58.0-2ubuntu3.18) ... +Selecting previously unselected package libssl-dev:amd64. +Preparing to unpack .../libssl-dev_1.1.1-1ubuntu2.1~18.04.17_amd64.deb ... +Unpacking libssl-dev:amd64 (1.1.1-1ubuntu2.1~18.04.17) ... +Setting up libssl-dev:amd64 (1.1.1-1ubuntu2.1~18.04.17) ... +Setting up libcurl4-openssl-dev:amd64 (7.58.0-2ubuntu3.18) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting certifi==2021.10.8 + Downloading certifi-2021.10.8-py2.py3-none-any.whl (149 kB) +Collecting charset-normalizer==2.0.10 + Downloading charset_normalizer-2.0.10-py3-none-any.whl (39 kB) +Collecting click==8.0.3 + Downloading click-8.0.3-py3-none-any.whl (97 kB) +Collecting idna==3.3 + Downloading idna-3.3-py3-none-any.whl (61 kB) +Collecting jinja2==3.0.3 + Downloading Jinja2-3.0.3-py3-none-any.whl (133 kB) +Collecting markupsafe==2.0.1 + Downloading MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (30 kB) +Collecting packaging==21.3 + Downloading packaging-21.3-py3-none-any.whl (40 kB) +ERROR: Could not find a version that satisfies the requirement prettytable==3.0.0 (from versions: 0.3, 0.4, 0.5, 0.6, 0.6.1, 0.7.1, 0.7.2, 1.0.0, 1.0.1, 2.0.0, 2.1.0, 2.2.0, 2.2.1, 2.3.0, 2.4.0, 2.5.0) +ERROR: No matching distribution found for prettytable==3.0.0 + +OSM client installed +OSM client assumes that OSM host is running in localhost (127.0.0.1). +In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file: + export OSM_HOSTNAME= +Checking OSM health state... + +Bootstraping... 1 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 2 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 3 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 4 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 5 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 6 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 7 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 8 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 9 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 10 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 11 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 12 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 13 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 14 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 15 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 16 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 17 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 18 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 19 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 20 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 21 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 22 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 23 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 24 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 25 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 26 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 27 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 28 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 29 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 30 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 31 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 32 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 33 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + +1 of 6 statefulsets starting: + mongodb-k8s 0/1 + + +Bootstraping... 34 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 35 attempts of 84 +5 of 9 deployments starting: + lcm 0/1 0 + mon 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 36 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 37 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 38 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 39 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 40 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 41 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 42 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + + +Bootstraping... 43 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + ro 0/1 0 + + +Bootstraping... 44 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 45 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 46 attempts of 84 +4 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + ro 0/1 0 + + +Bootstraping... 47 attempts of 84 +3 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + pol 0/1 0 + + +Bootstraping... 48 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + + +Bootstraping... 49 attempts of 84 +2 of 9 deployments starting: + lcm 0/1 0 + nbi 0/1 0 + + +Bootstraping... 50 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + + +Bootstraping... 51 attempts of 84 +1 of 9 deployments starting: + lcm 0/1 0 + +===> Successful checks: 24/24 +SYSTEM IS READY +Check OSM status with: kubectl -n osm get all +Traceback (most recent call last): + File "/usr/bin/osm", line 33, in + sys.exit(load_entry_point('osmclient==10.1.1+gac8d207', 'console_scripts', 'osm')()) + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 480, in load_entry_point + return get_distribution(dist).load_entry_point(group, name) + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2693, in load_entry_point + return ep.load() + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2324, in load + return self.resolve() + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2330, in resolve + module = __import__(self.module_name, fromlist=['__name__'], level=0) + File "/usr/lib/python3/dist-packages/osmclient/scripts/osm.py", line 22, in + from osmclient import client + File "/usr/lib/python3/dist-packages/osmclient/client.py", line 22, in + from osmclient.v1 import client as client + File "/usr/lib/python3/dist-packages/osmclient/v1/client.py", line 28, in + from osmclient.common import http + File "/usr/lib/python3/dist-packages/osmclient/common/http.py", line 18, in + import pycurl +ModuleNotFoundError: No module named 'pycurl' +Traceback (most recent call last): + File "/usr/bin/osm", line 33, in + sys.exit(load_entry_point('osmclient==10.1.1+gac8d207', 'console_scripts', 'osm')()) + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 480, in load_entry_point + return get_distribution(dist).load_entry_point(group, name) + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2693, in load_entry_point + return ep.load() + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2324, in load + return self.resolve() + File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 2330, in resolve + module = __import__(self.module_name, fromlist=['__name__'], level=0) + File "/usr/lib/python3/dist-packages/osmclient/scripts/osm.py", line 22, in + from osmclient import client + File "/usr/lib/python3/dist-packages/osmclient/client.py", line 22, in + from osmclient.v1 import client as client + File "/usr/lib/python3/dist-packages/osmclient/v1/client.py", line 28, in + from osmclient.common import http + File "/usr/lib/python3/dist-packages/osmclient/common/http.py", line 18, in + import pycurl +ModuleNotFoundError: No module named 'pycurl' + +DONE diff --git a/_tmp/osm-install-issues/mp.osm10-ubuntu18.min-hw.md b/_tmp/osm-install-issues/mp.osm10-ubuntu18.min-hw.md new file mode 100644 index 0000000..b15bfef --- /dev/null +++ b/_tmp/osm-install-issues/mp.osm10-ubuntu18.min-hw.md @@ -0,0 +1,518 @@ +```bash + +Last login: Sun Sep 5 19:08:04 on ttys015 +mactel:source-watcher andrea$ multipass find +Image Aliases Version Description +snapcraft:core18 20201111 Snapcraft builder for Core 18 +snapcraft:core20 20201111 Snapcraft builder for Core 20 +snapcraft:core 20210430 Snapcraft builder for Core 16 +18.04 bionic 20210825 Ubuntu 18.04 LTS +20.04 focal,lts 20210825 Ubuntu 20.04 LTS +mactel:source-watcher andrea$ multipass launch --name osm --cpus 2 --mem 6G --disk 40G 18.04 +One quick question before we launch … Would you like to help +the Multipass developers, by sending anonymous usage data? +This includes your operating system, which images you use, +the number of instances, their properties and how long you use them. +We’d also like to measure Multipass’s speed. + +Send usage data (yes/no/Later)? no +Launched: osm + +########################################################################################### +New Multipass 1.7.0 release +Workflows, auto-bridges and more... + +Go here for more information: https://github.com/CanonicalLtd/multipass/releases/tag/v1.7.0 +########################################################################################### +mactel:source-watcher andrea$ multipass exec osm -- bash +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +ubuntu@osm:~$ lsb_release -a +No LSB modules are available. +Distributor ID: Ubuntu +Description: Ubuntu 18.04.5 LTS +Release: 18.04 +Codename: bionic +ubuntu@osm:~$ wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +--2021-09-06 10:11:29-- https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +Resolving osm-download.etsi.org (osm-download.etsi.org)... 195.238.226.47 +Connecting to osm-download.etsi.org (osm-download.etsi.org)|195.238.226.47|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 9348 (9.1K) [text/x-sh] +Saving to: ‘install_osm.sh’ + +install_osm.sh 100%[============================================================================================>] 9.13K --.-KB/s in 0.003s + +2021-09-06 10:11:29 (3.12 MB/s) - ‘install_osm.sh’ saved [9348/9348] + +ubuntu@osm:~$ chmod +x install_osm.sh +ubuntu@osm:~$ ./install_osm.sh 2>&1 | tee osm_install_log.txt +Checking required packages: software-properties-common apt-transport-https +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease [4070 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:7 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 Packages [479 B] +Get:8 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2192 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/main Translation-en [430 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1748 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [375 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [27.3 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6808 B] +Get:16 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Get:17 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.0 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [4764 B] +Get:19 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [10.3 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [4588 B] +Get:21 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [1846 kB] +Get:22 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [338 kB] +Get:23 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1137 kB] +Get:24 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [259 kB] +Get:25 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [20.9 kB] +Get:26 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [4732 B] +Fetched 22.4 MB in 5s (4674 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 4 not upgraded. +Need to get 824 kB of archives. +After this operation, 9116 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 osm-devops all 10.0.1-1 [824 kB] +Fetched 824 kB in 0s (2210 kB/s) + Selecting previously unselected package osm-devops. +(Reading database ... 60392 files and directories currently installed.) +Preparing to unpack .../osm-devops_10.0.1-1_all.deb ... +Unpacking osm-devops (10.0.1-1) ... +Setting up osm-devops (10.0.1-1) ... +Checking required packages: git wget curl tar +2021-09-06T10:12:14+02:00 INFO Waiting for automatic snapd restart... +jq 1.5+dfsg-1 from Canonical* installed +## Mon Sep 6 10:12:19 CEST 2021 source: logging sourced +## Mon Sep 6 10:12:19 CEST 2021 source: config sourced +## Mon Sep 6 10:12:19 CEST 2021 source: container sourced +## Mon Sep 6 10:12:19 CEST 2021 source: git_functions sourced +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? y +Installing lightweight build of OSM +Checking required packages: snapd +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-lxd-inotify.conf ... +fs.inotify.max_user_instances = 1024 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 1 +net.ipv4.conf.all.rp_filter = 1 +net.ipv4.tcp_syncookies = 1 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.all.promote_secondaries = 1 +net.core.default_qdisc = fq_codel +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following packages will be REMOVED: + liblxc-common* liblxc1* lxcfs* lxd* lxd-client* +0 upgraded, 0 newly installed, 5 to remove and 4 not upgraded. +After this operation, 34.1 MB disk space will be freed. +(Reading database ... 61700 files and directories currently installed.)e ... +Removing lxd (3.0.3-0ubuntu1~18.04.1) ... +Removing lxd dnsmasq configuration +Removing lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd-client (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc1 (3.0.3-0ubuntu1~18.04.1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +(Reading database ... 61454 files and directories currently installed.) +Purging configuration files for liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxd (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Processing triggers for systemd (237-3ubuntu10.51) ... +Processing triggers for ureadahead (0.100.0-21) ... +lxd 4.17 from Canonical* installed +To start your first instance, try: lxc launch ubuntu:18.04 + +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20210119~18.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.96.24.32.14). +software-properties-common set to manually installed. +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 4 not upgraded. +Need to get 4348 B of archives. +After this operation, 154 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 apt-transport-https all 1.6.14 [4348 B] +Fetched 4348 B in 0s (54.8 kB/s) + Selecting previously unselected package apt-transport-https. +(Reading database ... 61437 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_1.6.14_all.deb ... +Unpacking apt-transport-https (1.6.14) ... +Setting up apt-transport-https (1.6.14) ... +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 https://download.docker.com/linux/ubuntu bionic InRelease [64.4 kB] +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 Packages [19.8 kB] +Fetched 84.3 kB in 1s (127 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + libltdl7 pigz +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +Recommended packages: + slirp4netns +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin libltdl7 pigz +0 upgraded, 7 newly installed, 0 to remove and 4 not upgraded. +Need to get 96.7 MB of archives. +After this operation, 407 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:2 https://download.docker.com/linux/ubuntu bionic/stable amd64 containerd.io amd64 1.4.9-1 [24.7 MB] +Get:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 libltdl7 amd64 2.4.6-2 [38.8 kB] +Get:4 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-cli amd64 5:20.10.8~3-0~ubuntu-bionic [38.8 MB] +Get:5 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce amd64 5:20.10.8~3-0~ubuntu-bionic [21.2 MB] +Get:6 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-rootless-extras amd64 5:20.10.8~3-0~ubuntu-bionic [7911 kB] +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-scan-plugin amd64 0.8.0~ubuntu-bionic [3888 kB] +Fetched 96.7 MB in 3s (28.7 MB/s) + Selecting previously unselected package pigz. +(Reading database ... 61441 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.4.9-1_amd64.deb ... +Unpacking containerd.io (1.4.9-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.8.0~ubuntu-bionic_amd64.deb ... +Unpacking docker-scan-plugin (0.8.0~ubuntu-bionic) ... +Selecting previously unselected package libltdl7:amd64. +Preparing to unpack .../6-libltdl7_2.4.6-2_amd64.deb ... +Unpacking libltdl7:amd64 (2.4.6-2) ... +Setting up containerd.io (1.4.9-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-bionic) ... +Setting up docker-scan-plugin (0.8.0~ubuntu-bionic) ... +Setting up libltdl7:amd64 (2.4.6-2) ... +Setting up docker-ce-cli (5:20.10.8~3-0~ubuntu-bionic) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce (5:20.10.8~3-0~ubuntu-bionic) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +Processing triggers for systemd (237-3ubuntu10.51) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.8 + API version: 1.41 + Go version: go1.16.6 + Git commit: 3967b7d + Built: Fri Jul 30 19:54:08 2021 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.8 + API version: 1.41 (minimum version 1.12) + Go version: go1.16.6 + Git commit: 75249d8 + Built: Fri Jul 30 19:52:16 2021 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.4.9 + GitCommit: e25210fe30a0a703442421b0f60afac609f950a3 + runc: + Version: 1.0.1 + GitCommit: v1.0.1-0-g4144b63 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Creating folders for installation +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (1.6.14). +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 4 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [49.4 kB] +Fetched 58.8 kB in 1s (56.0 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu bionic-security InRelease +Hit:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + conntrack cri-tools kubernetes-cni socat +The following NEW packages will be installed: + conntrack cri-tools kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 7 newly installed, 0 to remove and 4 not upgraded. +Need to get 71.4 MB of archives. +After this operation, 302 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 conntrack amd64 1:1.4.4+snapshot20161117-6ubuntu2 [30.6 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 socat amd64 1.7.3.2-2ubuntu2 [342 kB] +Get:3 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.13.0-01 [8775 kB] +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.15.0-00 [20.2 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.15.0-00 [8763 kB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.15.0-00 [8246 kB] +Fetched 71.4 MB in 2s (32.2 MB/s) + Selecting previously unselected package conntrack. +(Reading database ... 61694 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.4+snapshot20161117-6ubuntu2_amd64.deb ... +Unpacking conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.13.0-01_amd64.deb ... +Unpacking cri-tools (1.13.0-01) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../2-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../3-socat_1.7.3.2-2ubuntu2_amd64.deb ... +Unpacking socat (1.7.3.2-2ubuntu2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../4-kubelet_1.15.0-00_amd64.deb ... +Unpacking kubelet (1.15.0-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../5-kubectl_1.15.0-00_amd64.deb ... +Unpacking kubectl (1.15.0-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../6-kubeadm_1.15.0-00_amd64.deb ... +Unpacking kubeadm (1.15.0-00) ... +Setting up conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up cri-tools (1.13.0-01) ... +Setting up socat (1.7.3.2-2ubuntu2) ... +Setting up kubelet (1.15.0-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubectl (1.15.0-00) ... +Setting up kubeadm (1.15.0-00) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +I0906 10:14:00.311428 10501 version.go:248] remote version is much newer: v1.22.1; falling back to: stable-1.15 +[init] Using Kubernetes version: v1.15.12 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 18.09 +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [osm kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.64.19] +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 25.511910 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node osm as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: 1bjm75.9ghzdclhrx6enqgb +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.19:6443 --token 1bjm75.9ghzdclhrx6enqgb \ + --discovery-token-ca-cert-hash sha256:439d23d440f5fe042d93485a9c94342d6eb934e1c051ff5f196842c3e5135688 +Error from server (NotFound): namespaces "osm" not found +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds created +node/osm untainted +error: error reading [/tmp/openebs.eW4VwQ]: recognized file extensions are [.json .yaml .yml] +Waiting for storageclass + +### Mon Sep 6 10:22:04 CEST 2021 install_k8s_storageclass: FATAL error: Storageclass not ready after 400 seconds. Cannot install openebs +BACKTRACE: +### FATAL /usr/share/osm-devops/common/logging 39 +### install_k8s_storageclass /usr/share/osm-devops/installers/full_install_osm.sh 848 +### install_lightweight /usr/share/osm-devops/installers/full_install_osm.sh 1211 +### main /usr/share/osm-devops/installers/full_install_osm.sh 1876 +------- +ubuntu@osm:~$ exit +exit +mactel:source-watcher andrea$ mutlipass stop osm +-bash: mutlipass: command not found +mactel:source-watcher andrea$ multipass stop osm +mactel:source-watcher andrea$ multipass delete osm +mactel:source-watcher andrea$ + +``` \ No newline at end of file diff --git a/_tmp/osm-install-issues/mp.osm10-ubuntu20.md b/_tmp/osm-install-issues/mp.osm10-ubuntu20.md new file mode 100644 index 0000000..885ff87 --- /dev/null +++ b/_tmp/osm-install-issues/mp.osm10-ubuntu20.md @@ -0,0 +1,485 @@ +```bash + +Last login: Fri Sep 3 10:27:10 on ttys010 +mactel:source-watcher andrea$ brew update +Error: + homebrew-core is a shallow clone. + homebrew-cask is a shallow clone. +To `brew update`, first run: + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask fetch --unshallow +These commands may take a few minutes to run due to the large size of the repositories. +This restriction has been made on GitHub's request because updating shallow +clones is an extremely expensive operation due to the tree layout and traffic of +Homebrew/homebrew-core and Homebrew/homebrew-cask. We don't do this for you +automatically to avoid repeatedly performing an expensive unshallow operation in +CI systems (which should instead be fixed to not use shallow clones). Sorry for +the inconvenience! +mactel:source-watcher andrea$ brew install multipass +Error: + homebrew-core is a shallow clone. + homebrew-cask is a shallow clone. +To `brew update`, first run: + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask fetch --unshallow +These commands may take a few minutes to run due to the large size of the repositories. +This restriction has been made on GitHub's request because updating shallow +clones is an extremely expensive operation due to the tree layout and traffic of +Homebrew/homebrew-core and Homebrew/homebrew-cask. We don't do this for you +automatically to avoid repeatedly performing an expensive unshallow operation in +CI systems (which should instead be fixed to not use shallow clones). Sorry for +the inconvenience! +==> Downloading https://github.com/CanonicalLtd/multipass/releases/download/v1.6.2/multipass-1.6.2+mac-Darwin.pkg +==> Downloading from https://github-releases.githubusercontent.com/114128199/4dd79180-722d-11eb-8783-4cf31c574f09?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CS +######################################################################## 100.0% +==> Installing Cask multipass +==> Running installer for multipass; your password may be necessary. +Package installers may write to any location; options such as `--appdir` are ignored. +Password: +installer: Package name is multipass +installer: Installing at base path / +installer: The install was successful. +🍺 multipass was successfully installed! +mactel:source-watcher andrea$ multipass launch --name osm +Launched: osm + +########################################################################################### +New Multipass 1.7.0 release +Workflows, auto-bridges and more... + +Go here for more information: https://github.com/CanonicalLtd/multipass/releases/tag/v1.7.0 +########################################################################################### +mactel:source-watcher andrea$ multipass list +Name State IPv4 Image +osm Running 192.168.64.19 Ubuntu 20.04 LTS +mactel:source-watcher andrea$ multipass exec osm -- bash +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +ubuntu@osm:~$ wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +--2021-09-03 20:01:46-- https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +Resolving osm-download.etsi.org (osm-download.etsi.org)... 195.238.226.47 +Connecting to osm-download.etsi.org (osm-download.etsi.org)|195.238.226.47|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 9348 (9.1K) [text/x-sh] +Saving to: ‘install_osm.sh’ + +install_osm.sh 100%[============================================================================================>] 9.13K --.-KB/s in 0s + +2021-09-03 20:01:47 (39.4 MB/s) - ‘install_osm.sh’ saved [9348/9348] + +ubuntu@osm:~$ pwd +/home/ubuntu +ubuntu@osm:~$ chmod +x install_osm.sh +ubuntu@osm:~$ ./install_osm.sh 2>&1 | tee osm_install_log.txt +Checking required packages: software-properties-common apt-transport-https +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Get:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease [114 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal-backports InRelease [101 kB] +Get:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease [4070 B] +Get:5 http://security.ubuntu.com/ubuntu focal-security InRelease [114 kB] +Get:6 http://archive.ubuntu.com/ubuntu focal/universe amd64 Packages [8628 kB] +Get:7 http://archive.ubuntu.com/ubuntu focal/universe Translation-en [5124 kB] +Get:8 http://archive.ubuntu.com/ubuntu focal/universe amd64 c-n-f Metadata [265 kB] +Get:9 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 Packages [144 kB] +Get:10 http://archive.ubuntu.com/ubuntu focal/multiverse Translation-en [104 kB] +Get:11 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 c-n-f Metadata [9136 B] +Get:12 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 Packages [1175 kB] +Get:13 http://archive.ubuntu.com/ubuntu focal-updates/main Translation-en [254 kB] +Get:14 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 c-n-f Metadata [14.1 kB] +Get:15 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 Packages [853 kB] +Get:16 http://archive.ubuntu.com/ubuntu focal-updates/universe Translation-en [181 kB] +Get:17 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 c-n-f Metadata [18.8 kB] +Get:18 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 Packages [24.6 kB] +Get:19 http://archive.ubuntu.com/ubuntu focal-updates/multiverse Translation-en [6776 B] +Get:20 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 c-n-f Metadata [620 B] +Get:21 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 Packages [479 B] +Get:22 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 Packages [2568 B] +Get:23 http://archive.ubuntu.com/ubuntu focal-backports/main Translation-en [1120 B] +Get:24 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 c-n-f Metadata [400 B] +Get:25 http://archive.ubuntu.com/ubuntu focal-backports/restricted amd64 c-n-f Metadata [116 B] +Get:26 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 Packages [5812 B] +Get:27 http://archive.ubuntu.com/ubuntu focal-backports/universe Translation-en [2068 B] +Get:28 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 c-n-f Metadata [288 B] +Get:29 http://archive.ubuntu.com/ubuntu focal-backports/multiverse amd64 c-n-f Metadata [116 B] +Get:30 http://security.ubuntu.com/ubuntu focal-security/main amd64 Packages [830 kB] +Get:31 http://security.ubuntu.com/ubuntu focal-security/main Translation-en [162 kB] +Get:32 http://security.ubuntu.com/ubuntu focal-security/main amd64 c-n-f Metadata [8604 B] +Get:33 http://security.ubuntu.com/ubuntu focal-security/restricted amd64 Packages [374 kB] +Get:34 http://security.ubuntu.com/ubuntu focal-security/restricted Translation-en [53.7 kB] +Get:35 http://security.ubuntu.com/ubuntu focal-security/universe amd64 Packages [638 kB] +Get:36 http://security.ubuntu.com/ubuntu focal-security/universe Translation-en [101 kB] +Get:37 http://security.ubuntu.com/ubuntu focal-security/universe amd64 c-n-f Metadata [12.3 kB] +Get:38 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 Packages [21.9 kB] +Get:39 http://security.ubuntu.com/ubuntu focal-security/multiverse Translation-en [4948 B] +Get:40 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 c-n-f Metadata [540 B] +Fetched 19.4 MB in 4s (4432 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 5 not upgraded. +Need to get 824 kB of archives. +After this operation, 9116 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 osm-devops all 10.0.1-1 [824 kB] +Fetched 824 kB in 0s (2212 kB/s) + Selecting previously unselected package osm-devops. +(Reading database ... 63510 files and directories currently installed.) +Preparing to unpack .../osm-devops_10.0.1-1_all.deb ... +Unpacking osm-devops (10.0.1-1) ... +Setting up osm-devops (10.0.1-1) ... +Checking required packages: git wget curl tar +jq 1.5+dfsg-1 from Canonical* installed +## Fri Sep 3 20:03:04 CEST 2021 source: logging sourced +## Fri Sep 3 20:03:04 CEST 2021 source: config sourced +## Fri Sep 3 20:03:04 CEST 2021 source: container sourced +## Fri Sep 3 20:03:04 CEST 2021 source: git_functions sourced +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? y +Installing lightweight build of OSM +Checking required packages: snapd +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 2 +net.ipv4.conf.all.rp_filter = 2 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.default.promote_secondaries = 1 +sysctl: setting key "net.ipv4.conf.all.promote_secondaries": Invalid argument +net.ipv4.ping_group_range = 0 2147483647 +net.core.default_qdisc = fq_codel +fs.protected_regular = 1 +fs.protected_fifos = 1 +* Applying /usr/lib/sysctl.d/50-pid-max.conf ... +kernel.pid_max = 4194304 +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /usr/lib/sysctl.d/protect-links.conf ... +fs.protected_fifos = 1 +fs.protected_hardlinks = 1 +fs.protected_regular = 2 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +Package 'lxcfs' is not installed, so not removed +Package 'lxd' is not installed, so not removed +Package 'lxd-client' is not installed, so not removed +Package 'liblxc1' is not installed, so not removed +0 upgraded, 0 newly installed, 0 to remove and 5 not upgraded. +snap "lxd" is already installed, see 'snap help refresh' +To start your first instance, try: lxc launch ubuntu:18.04 + +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20210119~20.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.98.9.5). +software-properties-common set to manually installed. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 5 not upgraded. +Need to get 4680 B of archives. +After this operation, 162 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 apt-transport-https all 2.0.6 [4680 B] +Fetched 4680 B in 0s (75.1 kB/s) + Selecting previously unselected package apt-transport-https. +(Reading database ... 64818 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_2.0.6_all.deb ... +Unpacking apt-transport-https (2.0.6) ... +Setting up apt-transport-https (2.0.6) ... +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Get:2 https://download.docker.com/linux/ubuntu focal InRelease [52.1 kB] +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu focal-security InRelease +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 Packages [10.7 kB] +Fetched 62.9 kB in 1s (87.0 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + pigz slirp4netns +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin pigz slirp4netns +0 upgraded, 7 newly installed, 0 to remove and 5 not upgraded. +Need to get 96.7 MB of archives. +After this operation, 406 MB of additional disk space will be used. +Get:1 https://download.docker.com/linux/ubuntu focal/stable amd64 containerd.io amd64 1.4.9-1 [24.7 MB] +Get:2 http://archive.ubuntu.com/ubuntu focal/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal/universe amd64 slirp4netns amd64 0.4.3-1 [74.3 kB] +Get:4 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-cli amd64 5:20.10.8~3-0~ubuntu-focal [38.8 MB] +Get:5 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce amd64 5:20.10.8~3-0~ubuntu-focal [21.2 MB] +Get:6 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-rootless-extras amd64 5:20.10.8~3-0~ubuntu-focal [7917 kB] +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-scan-plugin amd64 0.8.0~ubuntu-focal [3889 kB] +Fetched 96.7 MB in 2s (43.0 MB/s) + Selecting previously unselected package pigz. +(Reading database ... 64822 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.4.9-1_amd64.deb ... +Unpacking containerd.io (1.4.9-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.8.0~ubuntu-focal_amd64.deb ... +Unpacking docker-scan-plugin (0.8.0~ubuntu-focal) ... +Selecting previously unselected package slirp4netns. +Preparing to unpack .../6-slirp4netns_0.4.3-1_amd64.deb ... +Unpacking slirp4netns (0.4.3-1) ... +Setting up slirp4netns (0.4.3-1) ... +Setting up docker-scan-plugin (0.8.0~ubuntu-focal) ... +Setting up containerd.io (1.4.9-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-cli (5:20.10.8~3-0~ubuntu-focal) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-focal) ... +Setting up docker-ce (5:20.10.8~3-0~ubuntu-focal) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for systemd (245.4-4ubuntu3.11) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.8 + API version: 1.41 + Go version: go1.16.6 + Git commit: 3967b7d + Built: Fri Jul 30 19:54:27 2021 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.8 + API version: 1.41 (minimum version 1.12) + Go version: go1.16.6 + Git commit: 75249d8 + Built: Fri Jul 30 19:52:33 2021 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.4.9 + GitCommit: e25210fe30a0a703442421b0f60afac609f950a3 + runc: + Version: 1.0.1 + GitCommit: v1.0.1-0-g4144b63 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Creating folders for installation +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (2.0.6). +0 upgraded, 0 newly installed, 0 to remove and 5 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu focal-security InRelease +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [49.4 kB] +Fetched 58.8 kB in 1s (58.7 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Hit:7 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + conntrack cri-tools ebtables kubernetes-cni socat +Suggested packages: + nftables +The following NEW packages will be installed: + conntrack cri-tools ebtables kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 8 newly installed, 0 to remove and 5 not upgraded. +Need to get 71.5 MB of archives. +After this operation, 303 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal/main amd64 conntrack amd64 1:1.4.5-2 [30.3 kB] +Get:2 http://archive.ubuntu.com/ubuntu focal/main amd64 ebtables amd64 2.0.11-3build1 [80.3 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal/main amd64 socat amd64 1.7.3.3-2 [323 kB] +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.13.0-01 [8775 kB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.15.0-00 [20.2 MB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.15.0-00 [8763 kB] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.15.0-00 [8246 kB] +Fetched 71.5 MB in 2s (31.7 MB/s) + Selecting previously unselected package conntrack. +(Reading database ... 65073 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.5-2_amd64.deb ... +Unpacking conntrack (1:1.4.5-2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.13.0-01_amd64.deb ... +Unpacking cri-tools (1.13.0-01) ... +Selecting previously unselected package ebtables. +Preparing to unpack .../2-ebtables_2.0.11-3build1_amd64.deb ... +Unpacking ebtables (2.0.11-3build1) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../3-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../4-socat_1.7.3.3-2_amd64.deb ... +Unpacking socat (1.7.3.3-2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../5-kubelet_1.15.0-00_amd64.deb ... +Unpacking kubelet (1.15.0-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../6-kubectl_1.15.0-00_amd64.deb ... +Unpacking kubectl (1.15.0-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../7-kubeadm_1.15.0-00_amd64.deb ... +Unpacking kubeadm (1.15.0-00) ... +Setting up conntrack (1:1.4.5-2) ... +Setting up kubectl (1.15.0-00) ... +Setting up ebtables (2.0.11-3build1) ... +Setting up socat (1.7.3.3-2) ... +Setting up cri-tools (1.13.0-01) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up kubelet (1.15.0-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubeadm (1.15.0-00) ... +Processing triggers for man-db (2.9.1-1) ... +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +I0903 20:04:29.012574 9138 version.go:248] remote version is much newer: v1.22.1; falling back to: stable-1.15 +[init] Using Kubernetes version: v1.15.12 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 18.09 +error execution phase preflight: [preflight] Some fatal errors occurred: + [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2 +[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...` +cp: cannot stat '/etc/kubernetes/admin.conf': No such file or directory +chown: cannot access '/home/ubuntu/.kube/config': No such file or directory +The connection to the server localhost:8080 was refused - did you specify the right host or port? +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused +unable to recognize "/tmp/flannel.L9QcEh/kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused + +### Fri Sep 3 20:04:36 CEST 2021 deploy_cni_provider: FATAL error: Cannot Install Flannel +BACKTRACE: +### FATAL /usr/share/osm-devops/common/logging 39 +### deploy_cni_provider /usr/share/osm-devops/installers/full_install_osm.sh 874 +### install_lightweight /usr/share/osm-devops/installers/full_install_osm.sh 1209 +### main /usr/share/osm-devops/installers/full_install_osm.sh 1876 +------- +ubuntu@osm:~$ exit +exit +mactel:source-watcher andrea$ multipass stop osm +Stopping osm -[2021-09-03T20:09:14.973] [error] [osm] process error occurred Crashed + +mactel:source-watcher andrea$ multipass list +Name State IPv4 Image +osm Stopped -- Ubuntu 20.04 LTS +mactel:source-watcher andrea$ multipass delete osm +mactel:source-watcher andrea$ multipass list +Name State IPv4 Image +osm Deleted -- Not Available +mactel:source-watcher andrea$ multipass purge +mactel:source-watcher andrea$ multipass list +No instances found. +mactel:source-watcher andrea$ + +``` \ No newline at end of file diff --git a/_tmp/osm-install-issues/mp.osm10-ubuntu20.min-hw.md b/_tmp/osm-install-issues/mp.osm10-ubuntu20.min-hw.md new file mode 100644 index 0000000..3549a85 --- /dev/null +++ b/_tmp/osm-install-issues/mp.osm10-ubuntu20.min-hw.md @@ -0,0 +1,487 @@ +```bash + +Last login: Fri Sep 3 20:09:34 on ttys011 +mactel:source-watcher andrea$ multipass launch --name osm --cpus 2 --mem 6G --disk 40G +Launched: osm +mactel:source-watcher andrea$ multipass exec osm -- bash +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +ubuntu@osm:~$ wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +--2021-09-03 20:20:02-- https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +Resolving osm-download.etsi.org (osm-download.etsi.org)... 195.238.226.47 +Connecting to osm-download.etsi.org (osm-download.etsi.org)|195.238.226.47|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 9348 (9.1K) [text/x-sh] +Saving to: ‘install_osm.sh’ + +install_osm.sh 100%[============================================================================================>] 9.13K --.-KB/s in 0.003s + +2021-09-03 20:20:02 (3.20 MB/s) - ‘install_osm.sh’ saved [9348/9348] + +ubuntu@osm:~$ chmod +x install_osm.sh +ubuntu@osm:~$ ./install_osm.sh 2>&1 | tee osm_install_log.txt +Checking required packages: software-properties-common apt-transport-https +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Get:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease [114 kB] +Get:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease [4070 B] +Get:4 http://security.ubuntu.com/ubuntu focal-security InRelease [114 kB] +Get:5 http://archive.ubuntu.com/ubuntu focal-backports InRelease [101 kB] +Get:6 http://archive.ubuntu.com/ubuntu focal/universe amd64 Packages [8628 kB] +Get:7 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 Packages [479 B] +Get:8 http://archive.ubuntu.com/ubuntu focal/universe Translation-en [5124 kB] +Get:9 http://archive.ubuntu.com/ubuntu focal/universe amd64 c-n-f Metadata [265 kB] +Get:10 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 Packages [144 kB] +Get:11 http://archive.ubuntu.com/ubuntu focal/multiverse Translation-en [104 kB] +Get:12 http://archive.ubuntu.com/ubuntu focal/multiverse amd64 c-n-f Metadata [9136 B] +Get:13 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 Packages [1175 kB] +Get:14 http://archive.ubuntu.com/ubuntu focal-updates/main Translation-en [254 kB] +Get:15 http://archive.ubuntu.com/ubuntu focal-updates/main amd64 c-n-f Metadata [14.1 kB] +Get:16 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 Packages [853 kB] +Get:17 http://archive.ubuntu.com/ubuntu focal-updates/universe Translation-en [181 kB] +Get:18 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 c-n-f Metadata [18.8 kB] +Get:19 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 Packages [24.6 kB] +Get:20 http://archive.ubuntu.com/ubuntu focal-updates/multiverse Translation-en [6776 B] +Get:21 http://archive.ubuntu.com/ubuntu focal-updates/multiverse amd64 c-n-f Metadata [620 B] +Get:22 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 Packages [2568 B] +Get:23 http://archive.ubuntu.com/ubuntu focal-backports/main Translation-en [1120 B] +Get:24 http://archive.ubuntu.com/ubuntu focal-backports/main amd64 c-n-f Metadata [400 B] +Get:25 http://archive.ubuntu.com/ubuntu focal-backports/restricted amd64 c-n-f Metadata [116 B] +Get:26 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 Packages [5812 B] +Get:27 http://archive.ubuntu.com/ubuntu focal-backports/universe Translation-en [2068 B] +Get:28 http://archive.ubuntu.com/ubuntu focal-backports/universe amd64 c-n-f Metadata [288 B] +Get:29 http://archive.ubuntu.com/ubuntu focal-backports/multiverse amd64 c-n-f Metadata [116 B] +Get:30 http://security.ubuntu.com/ubuntu focal-security/main amd64 Packages [830 kB] +Get:31 http://security.ubuntu.com/ubuntu focal-security/main Translation-en [162 kB] +Get:32 http://security.ubuntu.com/ubuntu focal-security/main amd64 c-n-f Metadata [8604 B] +Get:33 http://security.ubuntu.com/ubuntu focal-security/restricted amd64 Packages [374 kB] +Get:34 http://security.ubuntu.com/ubuntu focal-security/restricted Translation-en [53.7 kB] +Get:35 http://security.ubuntu.com/ubuntu focal-security/universe amd64 Packages [638 kB] +Get:36 http://security.ubuntu.com/ubuntu focal-security/universe Translation-en [101 kB] +Get:37 http://security.ubuntu.com/ubuntu focal-security/universe amd64 c-n-f Metadata [12.3 kB] +Get:38 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 Packages [21.9 kB] +Get:39 http://security.ubuntu.com/ubuntu focal-security/multiverse Translation-en [4948 B] +Get:40 http://security.ubuntu.com/ubuntu focal-security/multiverse amd64 c-n-f Metadata [540 B] +Fetched 19.4 MB in 4s (5011 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 5 not upgraded. +Need to get 824 kB of archives. +After this operation, 9116 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 osm-devops all 10.0.1-1 [824 kB] +Fetched 824 kB in 0s (2128 kB/s) + Selecting previously unselected package osm-devops. +(Reading database ... 63510 files and directories currently installed.) +Preparing to unpack .../osm-devops_10.0.1-1_all.deb ... +Unpacking osm-devops (10.0.1-1) ... +Setting up osm-devops (10.0.1-1) ... +Checking required packages: git wget curl tar +jq 1.5+dfsg-1 from Canonical* installed +## Fri Sep 3 20:20:51 CEST 2021 source: logging sourced +## Fri Sep 3 20:20:51 CEST 2021 source: config sourced +## Fri Sep 3 20:20:51 CEST 2021 source: container sourced +## Fri Sep 3 20:20:51 CEST 2021 source: git_functions sourced +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? y +Installing lightweight build of OSM +Checking required packages: snapd +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 2 +net.ipv4.conf.all.rp_filter = 2 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.default.promote_secondaries = 1 +sysctl: setting key "net.ipv4.conf.all.promote_secondaries": Invalid argument +net.ipv4.ping_group_range = 0 2147483647 +net.core.default_qdisc = fq_codel +fs.protected_regular = 1 +fs.protected_fifos = 1 +* Applying /usr/lib/sysctl.d/50-pid-max.conf ... +kernel.pid_max = 4194304 +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /usr/lib/sysctl.d/protect-links.conf ... +fs.protected_fifos = 1 +fs.protected_hardlinks = 1 +fs.protected_regular = 2 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +Package 'lxcfs' is not installed, so not removed +Package 'lxd' is not installed, so not removed +Package 'lxd-client' is not installed, so not removed +Package 'liblxc1' is not installed, so not removed +0 upgraded, 0 newly installed, 0 to remove and 5 not upgraded. +snap "lxd" is already installed, see 'snap help refresh' +To start your first instance, try: lxc launch ubuntu:18.04 + +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20210119~20.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.98.9.5). +software-properties-common set to manually installed. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 5 not upgraded. +Need to get 4680 B of archives. +After this operation, 162 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal-updates/universe amd64 apt-transport-https all 2.0.6 [4680 B] +Fetched 4680 B in 0s (72.8 kB/s) + Selecting previously unselected package apt-transport-https. +(Reading database ... 64818 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_2.0.6_all.deb ... +Unpacking apt-transport-https (2.0.6) ... +Setting up apt-transport-https (2.0.6) ... +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu focal InRelease +Get:2 https://download.docker.com/linux/ubuntu focal InRelease [52.1 kB] +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu focal-security InRelease +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 Packages [10.7 kB] +Fetched 62.9 kB in 1s (82.5 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + pigz slirp4netns +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin pigz slirp4netns +0 upgraded, 7 newly installed, 0 to remove and 5 not upgraded. +Need to get 96.7 MB of archives. +After this operation, 406 MB of additional disk space will be used. +Get:1 https://download.docker.com/linux/ubuntu focal/stable amd64 containerd.io amd64 1.4.9-1 [24.7 MB] +Get:2 http://archive.ubuntu.com/ubuntu focal/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal/universe amd64 slirp4netns amd64 0.4.3-1 [74.3 kB] +Get:4 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-cli amd64 5:20.10.8~3-0~ubuntu-focal [38.8 MB] +Get:5 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce amd64 5:20.10.8~3-0~ubuntu-focal [21.2 MB] +Get:6 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-ce-rootless-extras amd64 5:20.10.8~3-0~ubuntu-focal [7917 kB] +Get:7 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-scan-plugin amd64 0.8.0~ubuntu-focal [3889 kB] +Fetched 96.7 MB in 2s (46.9 MB/s) + Selecting previously unselected package pigz. +(Reading database ... 64822 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.4.9-1_amd64.deb ... +Unpacking containerd.io (1.4.9-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.8~3-0~ubuntu-focal_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-focal) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.8.0~ubuntu-focal_amd64.deb ... +Unpacking docker-scan-plugin (0.8.0~ubuntu-focal) ... +Selecting previously unselected package slirp4netns. +Preparing to unpack .../6-slirp4netns_0.4.3-1_amd64.deb ... +Unpacking slirp4netns (0.4.3-1) ... +Setting up slirp4netns (0.4.3-1) ... +Setting up docker-scan-plugin (0.8.0~ubuntu-focal) ... +Setting up containerd.io (1.4.9-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-cli (5:20.10.8~3-0~ubuntu-focal) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-focal) ... +Setting up docker-ce (5:20.10.8~3-0~ubuntu-focal) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for man-db (2.9.1-1) ... +Processing triggers for systemd (245.4-4ubuntu3.11) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.8 + API version: 1.41 + Go version: go1.16.6 + Git commit: 3967b7d + Built: Fri Jul 30 19:54:27 2021 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.8 + API version: 1.41 (minimum version 1.12) + Go version: go1.16.6 + Git commit: 75249d8 + Built: Fri Jul 30 19:52:33 2021 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.4.9 + GitCommit: e25210fe30a0a703442421b0f60afac609f950a3 + runc: + Version: 1.0.1 + GitCommit: v1.0.1-0-g4144b63 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Creating folders for installation +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (2.0.6). +0 upgraded, 0 newly installed, 0 to remove and 5 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu focal-security InRelease +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [49.4 kB] +Fetched 58.8 kB in 1s (55.8 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 https://download.docker.com/linux/ubuntu focal InRelease +Hit:2 http://archive.ubuntu.com/ubuntu focal InRelease +Hit:3 http://archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu focal-security InRelease +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Fetched 9383 B in 1s (10.7 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + conntrack cri-tools ebtables kubernetes-cni socat +Suggested packages: + nftables +The following NEW packages will be installed: + conntrack cri-tools ebtables kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 8 newly installed, 0 to remove and 5 not upgraded. +Need to get 71.5 MB of archives. +After this operation, 303 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu focal/main amd64 conntrack amd64 1:1.4.5-2 [30.3 kB] +Get:2 http://archive.ubuntu.com/ubuntu focal/main amd64 ebtables amd64 2.0.11-3build1 [80.3 kB] +Get:3 http://archive.ubuntu.com/ubuntu focal/main amd64 socat amd64 1.7.3.3-2 [323 kB] +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.13.0-01 [8775 kB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.15.0-00 [20.2 MB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.15.0-00 [8763 kB] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.15.0-00 [8246 kB] +Fetched 71.5 MB in 2s (32.8 MB/s) + Selecting previously unselected package conntrack. +(Reading database ... 65073 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.5-2_amd64.deb ... +Unpacking conntrack (1:1.4.5-2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.13.0-01_amd64.deb ... +Unpacking cri-tools (1.13.0-01) ... +Selecting previously unselected package ebtables. +Preparing to unpack .../2-ebtables_2.0.11-3build1_amd64.deb ... +Unpacking ebtables (2.0.11-3build1) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../3-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../4-socat_1.7.3.3-2_amd64.deb ... +Unpacking socat (1.7.3.3-2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../5-kubelet_1.15.0-00_amd64.deb ... +Unpacking kubelet (1.15.0-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../6-kubectl_1.15.0-00_amd64.deb ... +Unpacking kubectl (1.15.0-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../7-kubeadm_1.15.0-00_amd64.deb ... +Unpacking kubeadm (1.15.0-00) ... +Setting up conntrack (1:1.4.5-2) ... +Setting up kubectl (1.15.0-00) ... +Setting up ebtables (2.0.11-3build1) ... +Setting up socat (1.7.3.3-2) ... +Setting up cri-tools (1.13.0-01) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up kubelet (1.15.0-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubeadm (1.15.0-00) ... +Processing triggers for man-db (2.9.1-1) ... +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +I0903 20:22:12.541425 9240 version.go:248] remote version is much newer: v1.22.1; falling back to: stable-1.15 +[init] Using Kubernetes version: v1.15.12 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 18.09 +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [osm kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.64.19] +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 26.006114 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node osm as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: 4tmh7l.ishe157blg1ilg1q +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.19:6443 --token 4tmh7l.ishe157blg1ilg1q \ + --discovery-token-ca-cert-hash sha256:3bf3f4d5d3483c4a9eb1d0281caf1d9353b515b163b2c38afaa046294a24b01b +Error from server (NotFound): namespaces "osm" not found +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds created +node/osm untainted +error: error reading [/tmp/openebs.CeXhWG]: recognized file extensions are [.json .yaml .yml] +Waiting for storageclass + +### Fri Sep 3 20:30:21 CEST 2021 install_k8s_storageclass: FATAL error: Storageclass not ready after 400 seconds. Cannot install openebs +BACKTRACE: +### FATAL /usr/share/osm-devops/common/logging 39 +### install_k8s_storageclass /usr/share/osm-devops/installers/full_install_osm.sh 848 +### install_lightweight /usr/share/osm-devops/installers/full_install_osm.sh 1211 +### main /usr/share/osm-devops/installers/full_install_osm.sh 1876 +------- +ubuntu@osm:~$ exit +exit +mactel:source-watcher andrea$ multipass stop osm +mactel:source-watcher andrea$ multipass delete osm +mactel:source-watcher andrea$ multipass purge +mactel:source-watcher andrea$ multipass purge +mactel:source-watcher andrea$ + +``` \ No newline at end of file diff --git a/_tmp/osm-install-issues/vbox-install.md b/_tmp/osm-install-issues/vbox-install.md new file mode 100644 index 0000000..5975e55 --- /dev/null +++ b/_tmp/osm-install-issues/vbox-install.md @@ -0,0 +1,180 @@ +```bash + +Last login: Thu Sep 2 12:45:53 on ttys005 +mactel:source-watcher andrea$ brew cask uninstall virtualbox +Error: Unknown command: cask +mactel:source-watcher andrea$ brew cask --help +Error: Unknown command: cask +mactel:source-watcher andrea$ brew --help +Example usage: + brew search [TEXT|/REGEX/] + brew info [FORMULA...] + brew install FORMULA... + brew update + brew upgrade [FORMULA...] + brew uninstall FORMULA... + brew list [FORMULA...] + +Troubleshooting: + brew config + brew doctor + brew install --verbose --debug FORMULA + +Contributing: + brew create [URL [--no-fetch]] + brew edit [FORMULA...] + +Further help: + brew commands + brew help [COMMAND] + man brew + https://docs.brew.sh +mactel:source-watcher andrea$ brew cask +Error: Unknown command: cask +mactel:source-watcher andrea$ brew uninstall virtualbox +==> Uninstalling Cask virtualbox +==> Running uninstall script VirtualBox_Uninstall.tool +Password: + +Welcome to the VirtualBox uninstaller script. + +Executing: /usr/bin/kmutil showloaded --list-only --bundle-identifier org.virtualbox.kext.VBoxUSB +No variant specified, falling back to release +Executing: /usr/bin/kmutil showloaded --list-only --bundle-identifier org.virtualbox.kext.VBoxNetFlt +No variant specified, falling back to release +Executing: /usr/bin/kmutil showloaded --list-only --bundle-identifier org.virtualbox.kext.VBoxNetAdp +No variant specified, falling back to release +Executing: /usr/bin/kmutil showloaded --list-only --bundle-identifier org.virtualbox.kext.VBoxDrv +No variant specified, falling back to release +The following files and directories (bundles) will be removed: + /Users/andrea/Library/LaunchAgents/org.virtualbox.vboxwebsrv.plist + /usr/local/bin/VirtualBox + /usr/local/bin/VBoxManage + /usr/local/bin/VBoxVRDP + /usr/local/bin/VBoxHeadless + /usr/local/bin/vboxwebsrv + /usr/local/bin/VBoxBugReport + /usr/local/bin/VBoxBalloonCtrl + /usr/local/bin/VBoxAutostart + /usr/local/bin/VBoxDTrace + /usr/local/bin/vbox-img + /Library/LaunchDaemons/org.virtualbox.startup.plist + /Library/Python/2.7/site-packages/vboxapi/VirtualBox_constants.py + /Library/Python/2.7/site-packages/vboxapi/VirtualBox_constants.pyc + /Library/Python/2.7/site-packages/vboxapi/__init__.py + /Library/Python/2.7/site-packages/vboxapi/__init__.pyc + /Library/Python/2.7/site-packages/vboxapi-1.0-py2.7.egg-info + /Library/Application Support/VirtualBox/LaunchDaemons/ + /Library/Application Support/VirtualBox/VBoxDrv.kext/ + /Library/Application Support/VirtualBox/VBoxUSB.kext/ + /Library/Application Support/VirtualBox/VBoxNetFlt.kext/ + /Library/Application Support/VirtualBox/VBoxNetAdp.kext/ + /Applications/VirtualBox.app/ + /Library/Python/2.7/site-packages/vboxapi/ + +And the traces of following packages will be removed: + org.virtualbox.pkg.vboxkexts + org.virtualbox.pkg.virtualbox + org.virtualbox.pkg.virtualboxcli + +The uninstallation processes requires administrative privileges +because some of the installed files cannot be removed by a normal +user. You may be prompted for your password now... + +Successfully unloaded VirtualBox kernel extensions. +Forgot package 'org.virtualbox.pkg.vboxkexts' on '/'. +Forgot package 'org.virtualbox.pkg.virtualbox' on '/'. +Forgot package 'org.virtualbox.pkg.virtualboxcli' on '/'. +Done. +==> Uninstalling packages: +==> Purging files for version 6.0.0,127566 of Cask virtualbox +mactel:source-watcher andrea$ brew install virtualbox +Error: + homebrew-core is a shallow clone. + homebrew-cask is a shallow clone. +To `brew update`, first run: + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask fetch --unshallow +These commands may take a few minutes to run due to the large size of the repositories. +This restriction has been made on GitHub's request because updating shallow +clones is an extremely expensive operation due to the tree layout and traffic of +Homebrew/homebrew-core and Homebrew/homebrew-cask. We don't do this for you +automatically to avoid repeatedly performing an expensive unshallow operation in +CI systems (which should instead be fixed to not use shallow clones). Sorry for +the inconvenience! +==> Caveats +virtualbox requires a kernel extension to work. +If the installation fails, retry after you enable it in: + System Preferences → Security & Privacy → General + +For more information, refer to vendor documentation or this Apple Technical Note: + https://developer.apple.com/library/content/technotes/tn2459/_index.html + +==> Downloading https://download.virtualbox.org/virtualbox/6.1.18/VirtualBox-6.1.18-142142-OSX.dmg +######################################################################## 100.0% +==> Installing Cask virtualbox +==> Running installer for virtualbox; your password may be necessary. +Package installers may write to any location; options such as `--appdir` are ignored. +installer: Package name is Oracle VM VirtualBox +installer: choices changes file '/var/folders/tt/prkpxkn1001cg362nwffx4kc0000gn/T/choices20210902-67677-lnqbru.xml' applied +installer: Upgrading at base path / +installer: The upgrade was successful. +==> Changing ownership of paths required by virtualbox; your password may be necessary. +🍺 virtualbox was successfully installed! +mactel:source-watcher andrea$ + [Restored 2 Sep 2021 at 19:32:53] +Last login: Thu Sep 2 19:32:53 on ttys008 +mactel:source-watcher andrea$ brew install virtualbox virtualbox-extension-pack +Error: + homebrew-core is a shallow clone. + homebrew-cask is a shallow clone. +To `brew update`, first run: + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask fetch --unshallow +These commands may take a few minutes to run due to the large size of the repositories. +This restriction has been made on GitHub's request because updating shallow +clones is an extremely expensive operation due to the tree layout and traffic of +Homebrew/homebrew-core and Homebrew/homebrew-cask. We don't do this for you +automatically to avoid repeatedly performing an expensive unshallow operation in +CI systems (which should instead be fixed to not use shallow clones). Sorry for +the inconvenience! +Warning: Cask 'virtualbox' is already installed. + +To re-install virtualbox, run: + brew reinstall virtualbox +Warning: Cask 'virtualbox-extension-pack' is already installed. + +To re-install virtualbox-extension-pack, run: + brew reinstall virtualbox-extension-pack +mactel:source-watcher andrea$ brew uninstall virtualbox-extension-pack +==> Uninstalling Cask virtualbox-extension-pack +Password: +0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100% +==> Purging files for version 6.0.0 of Cask virtualbox-extension-pack +mactel:source-watcher andrea$ brew install virtualbox-extension-pack +Error: + homebrew-core is a shallow clone. + homebrew-cask is a shallow clone. +To `brew update`, first run: + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core fetch --unshallow + git -C /usr/local/Homebrew/Library/Taps/homebrew/homebrew-cask fetch --unshallow +These commands may take a few minutes to run due to the large size of the repositories. +This restriction has been made on GitHub's request because updating shallow +clones is an extremely expensive operation due to the tree layout and traffic of +Homebrew/homebrew-core and Homebrew/homebrew-cask. We don't do this for you +automatically to avoid repeatedly performing an expensive unshallow operation in +CI systems (which should instead be fixed to not use shallow clones). Sorry for +the inconvenience! +==> Caveats +Installing virtualbox-extension-pack means you have AGREED to the license at: + https://www.virtualbox.org/wiki/VirtualBox_PUEL + +==> Downloading https://download.virtualbox.org/virtualbox/6.1.18/Oracle_VM_VirtualBox_Extension_Pack-6.1.18.vbox-extpack +######################################################################## 100.0% +All formula dependencies satisfied. +==> Installing Cask virtualbox-extension-pack +0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100% +🍺 virtualbox-extension-pack was successfully installed! +mactel:source-watcher andrea$ + +``` \ No newline at end of file diff --git a/_tmp/osm-install-issues/vbox.osm10-ubuntu18.md b/_tmp/osm-install-issues/vbox.osm10-ubuntu18.md new file mode 100644 index 0000000..5e835a9 --- /dev/null +++ b/_tmp/osm-install-issues/vbox.osm10-ubuntu18.md @@ -0,0 +1,6 @@ +See terminal tabs output in VBox snapshot: "failed osm install" + +Also keep in mind when doing this from scratch again: update apt and get +PGP keys for repos---do it for every key ID shown in error messages. +See also: +- https://stackoverflow.com/questions/49877401/apt-get-update-error-related-with-kubeadm diff --git a/_tmp/osm-install/email.full_install_osm.sh b/_tmp/osm-install/email.full_install_osm.sh new file mode 100644 index 0000000..03b2c57 --- /dev/null +++ b/_tmp/osm-install/email.full_install_osm.sh @@ -0,0 +1,1870 @@ +#!/bin/bash +# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" +# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" +# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null; then + echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" + echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections + echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections + sudo apt-get -yq install iptables-persistent + fi +} + +#Configure NAT rules, based on the current IP addresses of containers +function nat(){ + check_install_iptables_persistent + + echo -e "\nConfiguring NAT rules" + echo -e " Required root privileges" + sudo $OSM_DEVOPS/installers/nat_osm +} + +function FATAL(){ + echo "FATAL error: Cannot install OSM due to \"$1\"" + exit 1 +} + +function update_juju_images(){ + crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab - + ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic +} + +function install_lxd() { + # Apply sysctl production values for optimal performance + sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf + sudo sysctl --system + + # Install LXD snap + sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client + sudo snap install lxd + + # Configure LXD + sudo usermod -a -G lxd `whoami` + cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed" + sg lxd -c "lxd waitready" + DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU" + sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU" + #sudo systemctl stop lxd-bridge + #sudo systemctl --system daemon-reload + #sudo systemctl enable lxd-bridge + #sudo systemctl start lxd-bridge +} + +function ask_user(){ + # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive + # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed + # Return: true(0) if user type 'yes'; false (1) if user type 'no' + read -e -p "$1" USER_CONFIRMATION + while true ; do + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 + [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 + [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 + read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION + done +} + +function install_osmclient(){ + CLIENT_RELEASE=${RELEASE#"-R "} + CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" + CLIENT_REPOSITORY=${REPOSITORY#"-r "} + CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} + key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY + curl $key_location | sudo apt-key add - + sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM" + sudo apt-get update + sudo apt-get install -y python3-pip + sudo -H LC_ALL=C python3 -m pip install -U pip + sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs + sudo apt-get install -y python3-osm-im python3-osmclient + if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then + python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt + fi + if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then + sudo apt-get install -y libcurl4-openssl-dev libssl-dev + python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt + fi + #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc + #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc + #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` + echo -e "\nOSM client installed" + if [ -z "$INSTALL_LIGHTWEIGHT" ]; then + echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" + echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" + echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" + else + echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)." + echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:" + echo " export OSM_HOSTNAME=" + fi + return 0 +} + +function install_prometheus_nodeexporter(){ + if (systemctl -q is-active node_exporter) + then + echo "Node Exporter is already running." + else + echo "Node Exporter is not active, installing..." + if getent passwd node_exporter > /dev/null 2>&1; then + echo "node_exporter user exists" + else + echo "Creating user node_exporter" + sudo useradd --no-create-home --shell /bin/false node_exporter + fi + wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/ + sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz + sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin + sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter + sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64* + sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo systemctl restart node_exporter + sudo systemctl enable node_exporter + echo "Node Exporter has been activated in this host." + fi + return 0 +} + +function uninstall_prometheus_nodeexporter(){ + sudo systemctl stop node_exporter + sudo systemctl disable node_exporter + sudo rm /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo userdel node_exporter + sudo rm /usr/local/bin/node_exporter + return 0 +} + +function install_docker_ce() { + # installs and configures Docker CE + echo "Installing Docker CE ..." + sudo apt-get -qq update + sudo apt-get install -y apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get -qq update + sudo apt-get install -y docker-ce + echo "Adding user to group 'docker'" + sudo groupadd -f docker + sudo usermod -aG docker $USER + sleep 2 + sudo service docker restart + echo "... restarted Docker service" + if [ -n "${DOCKER_PROXY_URL}" ]; then + echo "Configuring docker proxy ..." + if [ -f /etc/docker/daemon.json ]; then + if grep -q registry-mirrors /etc/docker/daemon.json; then + sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json + else + sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json + fi + else + sudo bash -c "cat << EOF > /etc/docker/daemon.json +{ + \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] +} +EOF" + fi + sudo systemctl daemon-reload + sudo service docker restart + echo "... restarted Docker service again" + fi + sg docker -c "docker version" || FATAL "Docker installation failed" + echo "... Docker CE installation done" + return 0 +} + +function install_docker_compose() { + # installs and configures docker-compose + echo "Installing Docker Compose ..." + sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + echo "... Docker Compose installation done" +} + +function install_juju() { + echo "Installing juju" + sudo snap install juju --classic --channel=$JUJU_VERSION/stable + [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}" + [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images + echo "Finished installation of juju" + return 0 +} + +function juju_createcontroller() { + if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then + # Not found created, create the controller + sudo usermod -a -G lxd ${USER} + sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME" + fi + [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed" + juju controller-config features=[k8s-operators] +} + +function juju_addk8s() { + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath +} + +function juju_createcontroller_k8s(){ + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client + juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \ + --config controller-service-type=loadbalancer \ + --agent-version=$JUJU_AGENT_VERSION +} + + +function juju_addlxd_cloud(){ + mkdir -p /tmp/.osm + OSM_VCA_CLOUDNAME="lxd-cloud" + LXDENDPOINT=$DEFAULT_IP + LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml + LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml + + cat << EOF > $LXD_CLOUD +clouds: + $OSM_VCA_CLOUDNAME: + type: lxd + auth-types: [certificate] + endpoint: "https://$LXDENDPOINT:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'` + + cat << EOF > $LXD_CREDENTIALS +credentials: + $OSM_VCA_CLOUDNAME: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: /tmp/.osm/client.crt + juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force + juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS + sg lxd -c "lxd waitready" + juju controller-config features=[k8s-operators] +} + + +function juju_createproxy() { + check_install_iptables_persistent + + if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then + sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST + sudo netfilter-persistent save + fi +} + +function docker_login() { + echo "Docker login" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}" +} + +function generate_docker_images() { + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then + sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" + done + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." + fi + echo "Finished generation of docker images" + fi + + echo "Finished pulling and generating docker images" +} + +function cmp_overwrite() { + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi +} + +function generate_docker_compose_files() { + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml + if [ -n "$INSTALL_PLA" ]; then + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml + fi +} + +function generate_k8s_manifest_files() { + #Kubernetes resources + $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml +} + +function generate_prometheus_grafana_files() { + [ -n "$KUBERNETES" ] && return + # Prometheus files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml + + # Grafana files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json + + # Prometheus Exporters files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service +} + +function generate_docker_env_files() { + echo "Doing a backup of existing env files" + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi + + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi + + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env + fi + + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi + + echo "Finished generation of docker env files" +} + +function generate_osmclient_script () { + echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm + $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm" + echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm" +} + +#installs kubernetes packages +function install_kube() { + sudo apt-get update && sudo apt-get install -y apt-transport-https + curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main" + sudo apt-get update + echo "Installing Kubernetes Packages ..." + sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 + sudo apt-mark hold kubelet kubeadm kubectl +} + +#initializes kubernetes control plane +function init_kubeadm() { + sudo swapoff -a + sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm init --config $1 + sleep 5 +} + +function kube_config_dir() { + [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes" + mkdir -p $HOME/.kube + sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config +} + +function install_k8s_storageclass() { + echo "Installing OpenEBS" + kubectl create ns openebs + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install --namespace openebs openebs openebs/openebs --version 1.12.0 + helm ls -n openebs + local storageclass_timeout=400 + local counter=0 + local storageclass_ready="" + echo "Waiting for storageclass" + while (( counter < storageclass_timeout )) + do + kubectl get storageclass openebs-hostpath &> /dev/null + + if [ $? -eq 0 ] ; then + echo "Storageclass available" + storageclass_ready="y" + break + else + counter=$((counter + 15)) + sleep 15 + fi + done + [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs" + kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +} + +function install_k8s_metallb() { + METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP + cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f - + echo "apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - $METALLB_IP_RANGE" | kubectl apply -f - +} +#deploys flannel as daemonsets +function deploy_cni_provider() { + CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")" + trap 'rm -rf "${CNI_DIR}"' EXIT + wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR + kubectl apply -f $CNI_DIR + [ $? -ne 0 ] && FATAL "Cannot Install Flannel" +} + +#creates secrets from env files which will be used by containers +function kube_secrets(){ + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env +} + +#taints K8s master node +function taint_master_node() { + K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}') + kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule- + sleep 5 +} + +#deploys osm pods and services +function deploy_osm_services() { + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR +} + +#deploy charmed services +function deploy_charmed_services() { + juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME + juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME +} + +function deploy_osm_pla_service() { + # corresponding to namespace_vol + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla +} + +#Install Helm v3 +function install_helm() { + helm > /dev/null 2>&1 + if [ $? != 0 ] ; then + # Helm is not installed. Install helm + echo "Helm is not installed, installing ..." + curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz + tar -zxvf helm-v3.6.3.tar.gz + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -r linux-amd64 + rm helm-v3.6.3.tar.gz + helm repo add stable https://charts.helm.sh/stable + helm repo update + fi +} + +function parse_yaml() { + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi + done +} + +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "10" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services + fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi +} + +function namespace_vol() { + osm_services="nbi lcm ro pol mon kafka mysql prometheus" + for osm in $osm_services; do + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + done +} + +function init_docker_swarm() { + if [ "${DEFAULT_MTU}" != "1500" ]; then + DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` + DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` + sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + fi + sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" + return 0 +} + +function create_docker_network() { + echo "creating network" + sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}" + echo "creating network DONE" +} + +function deploy_lightweight() { + + echo "Deploying lightweight build" + OSM_NBI_PORT=9999 + OSM_RO_PORT=9090 + OSM_KEYSTONE_PORT=5000 + OSM_UI_PORT=80 + OSM_MON_PORT=8662 + OSM_PROM_PORT=9090 + OSM_PROM_CADVISOR_PORT=8080 + OSM_PROM_HOSTPORT=9091 + OSM_GRAFANA_PORT=3000 + [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601 + #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000 + + if [ -n "$NO_HOST_PORTS" ]; then + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT) + else + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT) + fi + echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + + pushd $OSM_DOCKER_WORK_DIR + if [ -n "$INSTALL_PLA" ]; then + track deploy_osm_pla + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME" + else + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME" + fi + popd + + echo "Finished deployment of lightweight build" +} + +function deploy_elk() { + echo "Pulling docker images for ELK" + sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image" + sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image" + sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image" + sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image" + sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image" + echo "Finished pulling elk docker images" + $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk" + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk + remove_stack osm_elk + echo "Deploying ELK stack" + sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk" + echo "Waiting for ELK stack to be up and running" + time=0 + step=5 + timelength=40 + elk_is_up=1 + while [ $time -le $timelength ]; do + if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then + elk_is_up=0 + break + fi + sleep $step + time=$((time+step)) + done + if [ $elk_is_up -eq 0 ]; then + echo "ELK is up and running. Trying to create index pattern..." + #Create index pattern + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null + #Make it the default index + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}" 2>/dev/null + else + echo "Cannot connect to Kibana to create index pattern." + echo "Once Kibana is running, you can use the following instructions to create index pattern:" + echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"' + echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}"' + fi + echo "Finished deployment of ELK stack" + return 0 +} + +function add_local_k8scluster() { + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s +} + +function install_lightweight() { + track checkingroot + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track noroot + + if [ -n "$KUBERNETES" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + + else + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + fi + track proceed + + echo "Installing lightweight build of OSM" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'` + [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + need_packages_lw="snapd" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || FATAL "failed to install $need_packages_lw" + install_lxd + fi + + track prereqok + + [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce) + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + #Installs Kubernetes + if [ -n "$KUBERNETES" ]; then + install_kube + track install_k8s + init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml + kube_config_dir + track init_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # uninstall OSM MONITORING + uninstall_k8s_monitoring + track uninstall_k8s_monitoring + fi + #remove old namespace + remove_k8s_namespace $OSM_STACK_NAME + deploy_cni_provider + taint_master_node + install_helm + track install_helm + install_k8s_storageclass + track k8s_storageclass + install_k8s_metallb + track k8s_metallb + else + #install_docker_compose + [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm + track docker_swarm + fi + + [ -z "$INSTALL_NOJUJU" ] && install_juju + track juju_install + + if [ -z "$OSM_VCA_HOST" ]; then + if [ -z "$CONTROLLER_NAME" ]; then + + if [ -n "$KUBERNETES" ]; then + juju_createcontroller_k8s + juju_addlxd_cloud + else + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + OSM_VCA_CLOUDNAME="lxd-cloud" + juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE + juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE + fi + juju_createcontroller + juju_createproxy + fi + else + OSM_VCA_CLOUDNAME="lxd-cloud" + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE + else + mkdir -p ~/.osm + cat << EOF > ~/.osm/lxd-cloud.yaml +clouds: + lxd-cloud: + type: lxd + auth-types: [certificate] + endpoint: "https://$DEFAULT_IP:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat ~/.osm/client.key | sed 's/^/ /'` + cat << EOF > ~/.osm/lxd-credentials.yaml +credentials: + lxd-cloud: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: ~/.osm/client.crt + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml + fi + fi + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address" + fi + track juju_controller + + if [ -z "$OSM_VCA_SECRET" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME) + [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret" + fi + if [ -z "$OSM_VCA_PUBKEY" ]; then + OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub) + [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key" + fi + if [ -z "$OSM_VCA_CACERT" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate" + fi + + # Set OSM_VCA_APIPROXY only when it is not a k8s installation + if [ -z "$KUBERNETES" ]; then + if [ -z "$OSM_VCA_APIPROXY" ]; then + OSM_VCA_APIPROXY=$DEFAULT_IP + [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy" + fi + juju_createproxy + fi + track juju + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" + fi + + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_build + + if [ -n "$KUBERNETES" ]; then + generate_k8s_manifest_files + else + generate_docker_compose_files + fi + track manifest_files + generate_prometheus_grafana_files + generate_docker_env_files + track env_files + + if [ -n "$KUBERNETES" ]; then + deploy_charmed_services + kube_secrets + update_manifest_files + namespace_vol + deploy_osm_services + if [ -n "$INSTALL_PLA"]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm_pla + fi + track deploy_osm_services_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track install_k8s_monitoring + fi + else + # remove old stack + remove_stack $OSM_STACK_NAME + create_docker_network + deploy_lightweight + generate_osmclient_script + track docker_deploy + install_prometheus_nodeexporter + track nodeexporter + [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu + [ -n "$INSTALL_ELK" ] && deploy_elk && track elk + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient + + echo -e "Checking OSM health state..." + if [ -n "$KUBERNETES" ]; then + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track osm_unhealthy + else + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \ + track osm_unhealthy + fi + track after_healthcheck + + [ -n "$KUBERNETES" ] && add_local_k8scluster + track add_local_k8scluster + + wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null + track end + return 0 +} + +function install_to_openstack() { + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + $WORKDIR_SUDO apt install -y python3-pip python3-venv + $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml + else + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml + fi + + # Exit from venv + deactivate + + return 0 +} + +function install_vimemu() { + echo "\nInstalling vim-emu" + EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" + trap 'rm -rf "${EMUTEMPDIR}"' EXIT + # install prerequisites (OVS is a must for the emulator to work) + sudo apt-get install openvswitch-switch + # clone vim-emu repository (attention: branch is currently master only) + echo "Cloning vim-emu repository ..." + git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR + # build vim-emu docker + echo "Building vim-emu Docker container..." + + sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image" + # start vim-emu container as daemon + echo "Starting vim-emu Docker container 'vim-emu' ..." + if [ -n "$INSTALL_LIGHTWEIGHT" ]; then + # in lightweight mode, the emulator needs to be attached to netOSM + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + else + # classic build mode + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + fi + echo "Waiting for 'vim-emu' container to start ..." + sleep 5 + export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu") + echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." + # print vim-emu connection info + echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" + echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" + echo -e "To add the emulated VIM to OSM you should do:" + echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" +} + +function install_k8s_monitoring() { + # install OSM monitoring + $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh +} + +function uninstall_k8s_monitoring() { + # uninstall OSM monitoring + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh +} + +function dump_vars(){ + echo "DEVELOP=$DEVELOP" + echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" + echo "RECONFIGURE=$RECONFIGURE" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_ONLY=$INSTALL_ONLY" + echo "INSTALL_ELK=$INSTALL_ELK" + echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES" + #echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" + echo "TO_REBUILD=$TO_REBUILD" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "RELEASE=$RELEASE" + echo "REPOSITORY=$REPOSITORY" + echo "REPOSITORY_BASE=$REPOSITORY_BASE" + echo "REPOSITORY_KEY=$REPOSITORY_KEY" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "WORKDIR_SUDO=$WORKDIR_SUDO" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "DOCKER_USER=$DOCKER_USER" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "KUBERNETES=$KUBERNETES" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "SHOWOPTS=$SHOWOPTS" + echo "Install from specific refspec (-b): $COMMIT_ID" +} + +function track(){ + ctime=`date +%s` + duration=$((ctime - SESSION_ID)) + url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" + #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" + event_name="bin" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" + [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" + event_name="${event_name}_$1" + url="${url}&event=${event_name}&ce_duration=${duration}" + wget -q -O /dev/null $url +} + +function parse_docker_registry_url() { + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') +} + +JUJU_VERSION=2.9 +JUJU_AGENT_VERSION=2.9.9 +UNINSTALL="" +DEVELOP="" +UPDATE="" +RECONFIGURE="" +TEST_INSTALLER="" +INSTALL_LXD="" +SHOWOPTS="" +COMMIT_ID="" +ASSUME_YES="" +INSTALL_FROM_SOURCE="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_VIMEMU="" +INSTALL_PLA="" +LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" +LXD_REPOSITORY_PATH="" +INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" +INSTALL_ONLY="" +INSTALL_ELK="" +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +KUBERNETES="y" +INSTALL_K8S_MONITOR="" +INSTALL_NOHOSTCLIENT="" +INSTALL_NOCACHELXDIMAGES="" +SESSION_ID=`date +%s` +OSM_DEVOPS= +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +WORKDIR_SUDO=sudo +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="/etc/osm/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +PROMETHEUS_TAG=v2.4.3 +GRAFANA_TAG=latest +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= + +while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do + case "${o}" in + b) + COMMIT_ID=${OPTARG} + PULL_IMAGES="" + ;; + r) + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") + ;; + c) + [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue + [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue + echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2 + usage && exit 1 + ;; + k) + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") + ;; + u) + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") + ;; + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") + ;; + D) + OSM_DEVOPS="${OPTARG}" + ;; + o) + INSTALL_ONLY="y" + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + w) + # when specifying workdir, do not use sudo for access + WORKDIR_SUDO= + OSM_WORK_DIR="${OPTARG}" + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue + [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue + [ "${OPTARG}" == "update" ] && UPDATE="y" && continue + [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue + [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue + [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue + [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue + echo -e "Invalid option: '--$OPTARG'\n" >&2 + usage && exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + y) + ASSUME_YES="y" + ;; + *) + usage && exit 1 + ;; + esac +done + +[ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" + +if [ -n "$SHOWOPTS" ]; then + dump_vars + exit 0 +fi + +if [ -n "$CHARMED" ]; then + if [ -n "$UNINSTALL" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + else + ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + fi + + exit 0 +fi + +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" + +need_packages="git wget curl tar" + +[ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0 + +echo -e "Checking required packages: $need_packages" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "Installing $need_packages requires root privileges." \ + || sudo apt-get install -y $need_packages \ + || FATAL "failed to install $need_packages" +sudo snap install jq +if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" + else + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS + + if [ -z "$COMMIT_ID" ]; then + echo -e "\nGuessing the current stable release" + LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1` + [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 + + echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" + COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" + else + echo -e "\nDEVOPS Using commit $COMMIT_ID" + fi + git -C $OSM_DEVOPS checkout $COMMIT_ID + fi +fi + +. $OSM_DEVOPS/common/all_funcs + +[ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" +[ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk +#[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 + +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null +track start + +[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 +echo -e "\nInstalling OSM from refspec: $COMMIT_ID" +if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then + ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +fi + +echo -e "Checking required packages: lxd" +lxd --version &>/dev/null || FATAL "lxd not present, exiting." +[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd + +# use local devops for containers +export OSM_USE_LOCAL_DEVOPS=true + +#Install osmclient + +#Install vim-emu (optional) +[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu + +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null +track end +echo -e "\nDONE" \ No newline at end of file diff --git a/_tmp/osm-install/gitlab.full_install_osm.sh b/_tmp/osm-install/gitlab.full_install_osm.sh new file mode 100755 index 0000000..34a0657 --- /dev/null +++ b/_tmp/osm-install/gitlab.full_install_osm.sh @@ -0,0 +1,1870 @@ +#!/bin/bash +# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" +# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" +# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null; then + echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" + echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections + echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections + sudo apt-get -yq install iptables-persistent + fi +} + +#Configure NAT rules, based on the current IP addresses of containers +function nat(){ + check_install_iptables_persistent + + echo -e "\nConfiguring NAT rules" + echo -e " Required root privileges" + sudo $OSM_DEVOPS/installers/nat_osm +} + +function FATAL(){ + echo "FATAL error: Cannot install OSM due to \"$1\"" + exit 1 +} + +function update_juju_images(){ + crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab - + ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic +} + +function install_lxd() { + # Apply sysctl production values for optimal performance + sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf + sudo sysctl --system + + # Install LXD snap + sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client + sudo snap install lxd + + # Configure LXD + sudo usermod -a -G lxd `whoami` + cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed" + sg lxd -c "lxd waitready" + DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU" + sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU" + #sudo systemctl stop lxd-bridge + #sudo systemctl --system daemon-reload + #sudo systemctl enable lxd-bridge + #sudo systemctl start lxd-bridge +} + +function ask_user(){ + # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive + # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed + # Return: true(0) if user type 'yes'; false (1) if user type 'no' + read -e -p "$1" USER_CONFIRMATION + while true ; do + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 + [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 + [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 + read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION + done +} + +function install_osmclient(){ + CLIENT_RELEASE=${RELEASE#"-R "} + CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" + CLIENT_REPOSITORY=${REPOSITORY#"-r "} + CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} + key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY + curl $key_location | sudo apt-key add - + sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM" + sudo apt-get update + sudo apt-get install -y python3-pip + sudo -H LC_ALL=C python3 -m pip install -U pip + sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs + sudo apt-get install -y python3-osm-im python3-osmclient + if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then + python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt + fi + if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then + sudo apt-get install -y libcurl4-openssl-dev libssl-dev + python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt + fi + #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc + #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc + #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` + echo -e "\nOSM client installed" + if [ -z "$INSTALL_LIGHTWEIGHT" ]; then + echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" + echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" + echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" + else + echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)." + echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:" + echo " export OSM_HOSTNAME=" + fi + return 0 +} + +function install_prometheus_nodeexporter(){ + if (systemctl -q is-active node_exporter) + then + echo "Node Exporter is already running." + else + echo "Node Exporter is not active, installing..." + if getent passwd node_exporter > /dev/null 2>&1; then + echo "node_exporter user exists" + else + echo "Creating user node_exporter" + sudo useradd --no-create-home --shell /bin/false node_exporter + fi + wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/ + sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz + sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin + sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter + sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64* + sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo systemctl restart node_exporter + sudo systemctl enable node_exporter + echo "Node Exporter has been activated in this host." + fi + return 0 +} + +function uninstall_prometheus_nodeexporter(){ + sudo systemctl stop node_exporter + sudo systemctl disable node_exporter + sudo rm /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo userdel node_exporter + sudo rm /usr/local/bin/node_exporter + return 0 +} + +function install_docker_ce() { + # installs and configures Docker CE + echo "Installing Docker CE ..." + sudo apt-get -qq update + sudo apt-get install -y apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get -qq update + sudo apt-get install -y docker-ce + echo "Adding user to group 'docker'" + sudo groupadd -f docker + sudo usermod -aG docker $USER + sleep 2 + sudo service docker restart + echo "... restarted Docker service" + if [ -n "${DOCKER_PROXY_URL}" ]; then + echo "Configuring docker proxy ..." + if [ -f /etc/docker/daemon.json ]; then + if grep -q registry-mirrors /etc/docker/daemon.json; then + sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json + else + sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json + fi + else + sudo bash -c "cat << EOF > /etc/docker/daemon.json +{ + \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] +} +EOF" + fi + sudo systemctl daemon-reload + sudo service docker restart + echo "... restarted Docker service again" + fi + sg docker -c "docker version" || FATAL "Docker installation failed" + echo "... Docker CE installation done" + return 0 +} + +function install_docker_compose() { + # installs and configures docker-compose + echo "Installing Docker Compose ..." + sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + echo "... Docker Compose installation done" +} + +function install_juju() { + echo "Installing juju" + sudo snap install juju --classic --channel=$JUJU_VERSION/stable + [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}" + [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images + echo "Finished installation of juju" + return 0 +} + +function juju_createcontroller() { + if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then + # Not found created, create the controller + sudo usermod -a -G lxd ${USER} + sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME" + fi + [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed" + juju controller-config features=[k8s-operators] +} + +function juju_addk8s() { + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath +} + +function juju_createcontroller_k8s(){ + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client + juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \ + --config controller-service-type=loadbalancer \ + --agent-version=$JUJU_AGENT_VERSION +} + + +function juju_addlxd_cloud(){ + mkdir -p /tmp/.osm + OSM_VCA_CLOUDNAME="lxd-cloud" + LXDENDPOINT=$DEFAULT_IP + LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml + LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml + + cat << EOF > $LXD_CLOUD +clouds: + $OSM_VCA_CLOUDNAME: + type: lxd + auth-types: [certificate] + endpoint: "https://$LXDENDPOINT:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'` + + cat << EOF > $LXD_CREDENTIALS +credentials: + $OSM_VCA_CLOUDNAME: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: /tmp/.osm/client.crt + juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force + juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS + sg lxd -c "lxd waitready" + juju controller-config features=[k8s-operators] +} + + +function juju_createproxy() { + check_install_iptables_persistent + + if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then + sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST + sudo netfilter-persistent save + fi +} + +function docker_login() { + echo "Docker login" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}" +} + +function generate_docker_images() { + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then + sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" + done + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." + fi + echo "Finished generation of docker images" + fi + + echo "Finished pulling and generating docker images" +} + +function cmp_overwrite() { + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi +} + +function generate_docker_compose_files() { + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml + if [ -n "$INSTALL_PLA" ]; then + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml + fi +} + +function generate_k8s_manifest_files() { + #Kubernetes resources + $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml +} + +function generate_prometheus_grafana_files() { + [ -n "$KUBERNETES" ] && return + # Prometheus files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml + + # Grafana files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json + + # Prometheus Exporters files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service +} + +function generate_docker_env_files() { + echo "Doing a backup of existing env files" + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi + + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi + + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env + fi + + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi + + echo "Finished generation of docker env files" +} + +function generate_osmclient_script () { + echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm + $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm" + echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm" +} + +#installs kubernetes packages +function install_kube() { + sudo apt-get update && sudo apt-get install -y apt-transport-https + curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main" + sudo apt-get update + echo "Installing Kubernetes Packages ..." + sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 + sudo apt-mark hold kubelet kubeadm kubectl +} + +#initializes kubernetes control plane +function init_kubeadm() { + sudo swapoff -a + sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm init --config $1 + sleep 5 +} + +function kube_config_dir() { + [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes" + mkdir -p $HOME/.kube + sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config +} + +function install_k8s_storageclass() { + echo "Installing OpenEBS" + kubectl create ns openebs + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install --namespace openebs openebs openebs/openebs --version 1.12.0 + helm ls -n openebs + local storageclass_timeout=400 + local counter=0 + local storageclass_ready="" + echo "Waiting for storageclass" + while (( counter < storageclass_timeout )) + do + kubectl get storageclass openebs-hostpath &> /dev/null + + if [ $? -eq 0 ] ; then + echo "Storageclass available" + storageclass_ready="y" + break + else + counter=$((counter + 15)) + sleep 15 + fi + done + [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs" + kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +} + +function install_k8s_metallb() { + METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP + cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f - + echo "apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - $METALLB_IP_RANGE" | kubectl apply -f - +} +#deploys flannel as daemonsets +function deploy_cni_provider() { + CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")" + trap 'rm -rf "${CNI_DIR}"' EXIT + wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR + kubectl apply -f $CNI_DIR + [ $? -ne 0 ] && FATAL "Cannot Install Flannel" +} + +#creates secrets from env files which will be used by containers +function kube_secrets(){ + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env +} + +#taints K8s master node +function taint_master_node() { + K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}') + kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule- + sleep 5 +} + +#deploys osm pods and services +function deploy_osm_services() { + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR +} + +#deploy charmed services +function deploy_charmed_services() { + juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME + juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME +} + +function deploy_osm_pla_service() { + # corresponding to namespace_vol + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla +} + +#Install Helm v3 +function install_helm() { + helm > /dev/null 2>&1 + if [ $? != 0 ] ; then + # Helm is not installed. Install helm + echo "Helm is not installed, installing ..." + curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz + tar -zxvf helm-v3.6.3.tar.gz + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -r linux-amd64 + rm helm-v3.6.3.tar.gz + helm repo add stable https://charts.helm.sh/stable + helm repo update + fi +} + +function parse_yaml() { + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi + done +} + +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "10" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services + fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi +} + +function namespace_vol() { + osm_services="nbi lcm ro pol mon kafka mysql prometheus" + for osm in $osm_services; do + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + done +} + +function init_docker_swarm() { + if [ "${DEFAULT_MTU}" != "1500" ]; then + DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` + DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` + sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + fi + sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" + return 0 +} + +function create_docker_network() { + echo "creating network" + sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}" + echo "creating network DONE" +} + +function deploy_lightweight() { + + echo "Deploying lightweight build" + OSM_NBI_PORT=9999 + OSM_RO_PORT=9090 + OSM_KEYSTONE_PORT=5000 + OSM_UI_PORT=80 + OSM_MON_PORT=8662 + OSM_PROM_PORT=9090 + OSM_PROM_CADVISOR_PORT=8080 + OSM_PROM_HOSTPORT=9091 + OSM_GRAFANA_PORT=3000 + [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601 + #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000 + + if [ -n "$NO_HOST_PORTS" ]; then + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT) + else + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT) + fi + echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + + pushd $OSM_DOCKER_WORK_DIR + if [ -n "$INSTALL_PLA" ]; then + track deploy_osm_pla + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME" + else + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME" + fi + popd + + echo "Finished deployment of lightweight build" +} + +function deploy_elk() { + echo "Pulling docker images for ELK" + sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image" + sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image" + sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image" + sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image" + sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image" + echo "Finished pulling elk docker images" + $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk" + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk + remove_stack osm_elk + echo "Deploying ELK stack" + sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk" + echo "Waiting for ELK stack to be up and running" + time=0 + step=5 + timelength=40 + elk_is_up=1 + while [ $time -le $timelength ]; do + if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then + elk_is_up=0 + break + fi + sleep $step + time=$((time+step)) + done + if [ $elk_is_up -eq 0 ]; then + echo "ELK is up and running. Trying to create index pattern..." + #Create index pattern + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null + #Make it the default index + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}" 2>/dev/null + else + echo "Cannot connect to Kibana to create index pattern." + echo "Once Kibana is running, you can use the following instructions to create index pattern:" + echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"' + echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}"' + fi + echo "Finished deployment of ELK stack" + return 0 +} + +function add_local_k8scluster() { + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s +} + +function install_lightweight() { + track checkingroot + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track noroot + + if [ -n "$KUBERNETES" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + + else + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + fi + track proceed + + echo "Installing lightweight build of OSM" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'` + [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + need_packages_lw="snapd" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || FATAL "failed to install $need_packages_lw" + install_lxd + fi + + track prereqok + + [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce) + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + #Installs Kubernetes + if [ -n "$KUBERNETES" ]; then + install_kube + track install_k8s + init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml + kube_config_dir + track init_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # uninstall OSM MONITORING + uninstall_k8s_monitoring + track uninstall_k8s_monitoring + fi + #remove old namespace + remove_k8s_namespace $OSM_STACK_NAME + deploy_cni_provider + taint_master_node + install_helm + track install_helm + install_k8s_storageclass + track k8s_storageclass + install_k8s_metallb + track k8s_metallb + else + #install_docker_compose + [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm + track docker_swarm + fi + + [ -z "$INSTALL_NOJUJU" ] && install_juju + track juju_install + + if [ -z "$OSM_VCA_HOST" ]; then + if [ -z "$CONTROLLER_NAME" ]; then + + if [ -n "$KUBERNETES" ]; then + juju_createcontroller_k8s + juju_addlxd_cloud + else + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + OSM_VCA_CLOUDNAME="lxd-cloud" + juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE + juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE + fi + juju_createcontroller + juju_createproxy + fi + else + OSM_VCA_CLOUDNAME="lxd-cloud" + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE + else + mkdir -p ~/.osm + cat << EOF > ~/.osm/lxd-cloud.yaml +clouds: + lxd-cloud: + type: lxd + auth-types: [certificate] + endpoint: "https://$DEFAULT_IP:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat ~/.osm/client.key | sed 's/^/ /'` + cat << EOF > ~/.osm/lxd-credentials.yaml +credentials: + lxd-cloud: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: ~/.osm/client.crt + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml + fi + fi + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address" + fi + track juju_controller + + if [ -z "$OSM_VCA_SECRET" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME) + [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret" + fi + if [ -z "$OSM_VCA_PUBKEY" ]; then + OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub) + [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key" + fi + if [ -z "$OSM_VCA_CACERT" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate" + fi + + # Set OSM_VCA_APIPROXY only when it is not a k8s installation + if [ -z "$KUBERNETES" ]; then + if [ -z "$OSM_VCA_APIPROXY" ]; then + OSM_VCA_APIPROXY=$DEFAULT_IP + [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy" + fi + juju_createproxy + fi + track juju + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" + fi + + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_build + + if [ -n "$KUBERNETES" ]; then + generate_k8s_manifest_files + else + generate_docker_compose_files + fi + track manifest_files + generate_prometheus_grafana_files + generate_docker_env_files + track env_files + + if [ -n "$KUBERNETES" ]; then + deploy_charmed_services + kube_secrets + update_manifest_files + namespace_vol + deploy_osm_services + if [ -n "$INSTALL_PLA"]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm_pla + fi + track deploy_osm_services_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track install_k8s_monitoring + fi + else + # remove old stack + remove_stack $OSM_STACK_NAME + create_docker_network + deploy_lightweight + generate_osmclient_script + track docker_deploy + install_prometheus_nodeexporter + track nodeexporter + [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu + [ -n "$INSTALL_ELK" ] && deploy_elk && track elk + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient + + echo -e "Checking OSM health state..." + if [ -n "$KUBERNETES" ]; then + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track osm_unhealthy + else + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \ + track osm_unhealthy + fi + track after_healthcheck + + [ -n "$KUBERNETES" ] && add_local_k8scluster + track add_local_k8scluster + + wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null + track end + return 0 +} + +function install_to_openstack() { + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + $WORKDIR_SUDO apt install -y python3-pip python3-venv + $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml + else + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml + fi + + # Exit from venv + deactivate + + return 0 +} + +function install_vimemu() { + echo "\nInstalling vim-emu" + EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" + trap 'rm -rf "${EMUTEMPDIR}"' EXIT + # install prerequisites (OVS is a must for the emulator to work) + sudo apt-get install openvswitch-switch + # clone vim-emu repository (attention: branch is currently master only) + echo "Cloning vim-emu repository ..." + git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR + # build vim-emu docker + echo "Building vim-emu Docker container..." + + sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image" + # start vim-emu container as daemon + echo "Starting vim-emu Docker container 'vim-emu' ..." + if [ -n "$INSTALL_LIGHTWEIGHT" ]; then + # in lightweight mode, the emulator needs to be attached to netOSM + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + else + # classic build mode + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + fi + echo "Waiting for 'vim-emu' container to start ..." + sleep 5 + export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu") + echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." + # print vim-emu connection info + echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" + echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" + echo -e "To add the emulated VIM to OSM you should do:" + echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" +} + +function install_k8s_monitoring() { + # install OSM monitoring + $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh +} + +function uninstall_k8s_monitoring() { + # uninstall OSM monitoring + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh +} + +function dump_vars(){ + echo "DEVELOP=$DEVELOP" + echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" + echo "RECONFIGURE=$RECONFIGURE" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_ONLY=$INSTALL_ONLY" + echo "INSTALL_ELK=$INSTALL_ELK" + echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES" + #echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" + echo "TO_REBUILD=$TO_REBUILD" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "RELEASE=$RELEASE" + echo "REPOSITORY=$REPOSITORY" + echo "REPOSITORY_BASE=$REPOSITORY_BASE" + echo "REPOSITORY_KEY=$REPOSITORY_KEY" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "WORKDIR_SUDO=$WORKDIR_SUDO" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "DOCKER_USER=$DOCKER_USER" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "KUBERNETES=$KUBERNETES" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "SHOWOPTS=$SHOWOPTS" + echo "Install from specific refspec (-b): $COMMIT_ID" +} + +function track(){ + ctime=`date +%s` + duration=$((ctime - SESSION_ID)) + url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" + #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" + event_name="bin" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" + [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" + event_name="${event_name}_$1" + url="${url}&event=${event_name}&ce_duration=${duration}" + wget -q -O /dev/null $url +} + +function parse_docker_registry_url() { + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') +} + +JUJU_VERSION=2.9 +JUJU_AGENT_VERSION=2.9.9 +UNINSTALL="" +DEVELOP="" +UPDATE="" +RECONFIGURE="" +TEST_INSTALLER="" +INSTALL_LXD="" +SHOWOPTS="" +COMMIT_ID="" +ASSUME_YES="" +INSTALL_FROM_SOURCE="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_VIMEMU="" +INSTALL_PLA="" +LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" +LXD_REPOSITORY_PATH="" +INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" +INSTALL_ONLY="" +INSTALL_ELK="" +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +KUBERNETES="y" +INSTALL_K8S_MONITOR="" +INSTALL_NOHOSTCLIENT="" +INSTALL_NOCACHELXDIMAGES="" +SESSION_ID=`date +%s` +OSM_DEVOPS= +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +WORKDIR_SUDO=sudo +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="/etc/osm/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +PROMETHEUS_TAG=v2.4.3 +GRAFANA_TAG=latest +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= + +while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do + case "${o}" in + b) + COMMIT_ID=${OPTARG} + PULL_IMAGES="" + ;; + r) + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") + ;; + c) + [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue + [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue + echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2 + usage && exit 1 + ;; + k) + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") + ;; + u) + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") + ;; + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") + ;; + D) + OSM_DEVOPS="${OPTARG}" + ;; + o) + INSTALL_ONLY="y" + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + w) + # when specifying workdir, do not use sudo for access + WORKDIR_SUDO= + OSM_WORK_DIR="${OPTARG}" + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue + [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue + [ "${OPTARG}" == "update" ] && UPDATE="y" && continue + [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue + [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue + [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue + [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue + echo -e "Invalid option: '--$OPTARG'\n" >&2 + usage && exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + y) + ASSUME_YES="y" + ;; + *) + usage && exit 1 + ;; + esac +done + +[ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" + +if [ -n "$SHOWOPTS" ]; then + dump_vars + exit 0 +fi + +if [ -n "$CHARMED" ]; then + if [ -n "$UNINSTALL" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + else + ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + fi + + exit 0 +fi + +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" + +need_packages="git wget curl tar" + +[ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0 + +echo -e "Checking required packages: $need_packages" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "Installing $need_packages requires root privileges." \ + || sudo apt-get install -y $need_packages \ + || FATAL "failed to install $need_packages" +sudo snap install jq +if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" + else + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS + + if [ -z "$COMMIT_ID" ]; then + echo -e "\nGuessing the current stable release" + LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1` + [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 + + echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" + COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" + else + echo -e "\nDEVOPS Using commit $COMMIT_ID" + fi + git -C $OSM_DEVOPS checkout $COMMIT_ID + fi +fi + +. $OSM_DEVOPS/common/all_funcs + +[ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" +[ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk +#[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 + +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null +track start + +[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 +echo -e "\nInstalling OSM from refspec: $COMMIT_ID" +if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then + ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +fi + +echo -e "Checking required packages: lxd" +lxd --version &>/dev/null || FATAL "lxd not present, exiting." +[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd + +# use local devops for containers +export OSM_USE_LOCAL_DEVOPS=true + +#Install osmclient + +#Install vim-emu (optional) +[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu + +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null +track end +echo -e "\nDONE" diff --git a/_tmp/osm-install/gitlab.install_osm.sh b/_tmp/osm-install/gitlab.install_osm.sh new file mode 100644 index 0000000..4e1bbb1 --- /dev/null +++ b/_tmp/osm-install/gitlab.install_osm.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +REPOSITORY_BASE=https://osm-download.etsi.org/repository/osm/debian +RELEASE=ReleaseTEN +REPOSITORY=stable +DOCKER_TAG=10 +DEVOPS_PATH=/usr/share/osm-devops + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +add_repo() { + REPO_CHECK="^$1" + grep "${REPO_CHECK/\[arch=amd64\]/\\[arch=amd64\\]}" /etc/apt/sources.list > /dev/null 2>&1 + if [ $? -ne 0 ] + then + need_packages_lw="software-properties-common apt-transport-https" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get -q update \ + || ! echo "failed to run apt-get update" \ + || exit 1 + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || ! echo "failed to install $need_packages_lw" \ + || exit 1 + wget -qO - $REPOSITORY_BASE/$RELEASE/OSM%20ETSI%20Release%20Key.gpg | sudo apt-key add - + sudo DEBIAN_FRONTEND=noninteractive add-apt-repository -y "$1" && sudo DEBIAN_FRONTEND=noninteractive apt-get update + return 0 + fi + + return 1 +} + +clean_old_repo() { +dpkg -s 'osm-devops' &> /dev/null +if [ $? -eq 0 ]; then + # Clean the previous repos that might exist + sudo sed -i "/osm-download.etsi.org/d" /etc/apt/sources.list +fi +} + +while getopts ":b:r:c:n:k:u:R:l:L:K:p:D:o:O:m:N:H:S:s:w:t:U:P:A:d:p:f:F:-: hy" o; do + case "${o}" in + D) + DEVOPS_PATH="${OPTARG}" + ;; + r) + REPOSITORY="${OPTARG}" + ;; + R) + RELEASE="${OPTARG}" + ;; + u) + REPOSITORY_BASE="${OPTARG}" + ;; + t) + DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + *) + ;; + esac +done + +clean_old_repo +add_repo "deb [arch=amd64] $REPOSITORY_BASE/$RELEASE $REPOSITORY devops" +sudo DEBIAN_FRONTEND=noninteractive apt-get -q update +sudo DEBIAN_FRONTEND=noninteractive apt-get install osm-devops +$DEVOPS_PATH/installers/full_install_osm.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $DEVOPS_PATH -t $DOCKER_TAG "$@" diff --git a/_tmp/osm-install/gitweb.full_install_osm.sh b/_tmp/osm-install/gitweb.full_install_osm.sh new file mode 100644 index 0000000..03b2c57 --- /dev/null +++ b/_tmp/osm-install/gitweb.full_install_osm.sh @@ -0,0 +1,1870 @@ +#!/bin/bash +# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" +# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" +# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null; then + echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" + echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections + echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections + sudo apt-get -yq install iptables-persistent + fi +} + +#Configure NAT rules, based on the current IP addresses of containers +function nat(){ + check_install_iptables_persistent + + echo -e "\nConfiguring NAT rules" + echo -e " Required root privileges" + sudo $OSM_DEVOPS/installers/nat_osm +} + +function FATAL(){ + echo "FATAL error: Cannot install OSM due to \"$1\"" + exit 1 +} + +function update_juju_images(){ + crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab - + ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic +} + +function install_lxd() { + # Apply sysctl production values for optimal performance + sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf + sudo sysctl --system + + # Install LXD snap + sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client + sudo snap install lxd + + # Configure LXD + sudo usermod -a -G lxd `whoami` + cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed" + sg lxd -c "lxd waitready" + DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU" + sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU" + #sudo systemctl stop lxd-bridge + #sudo systemctl --system daemon-reload + #sudo systemctl enable lxd-bridge + #sudo systemctl start lxd-bridge +} + +function ask_user(){ + # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive + # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed + # Return: true(0) if user type 'yes'; false (1) if user type 'no' + read -e -p "$1" USER_CONFIRMATION + while true ; do + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 + [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 + [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 + read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION + done +} + +function install_osmclient(){ + CLIENT_RELEASE=${RELEASE#"-R "} + CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" + CLIENT_REPOSITORY=${REPOSITORY#"-r "} + CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} + key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY + curl $key_location | sudo apt-key add - + sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM" + sudo apt-get update + sudo apt-get install -y python3-pip + sudo -H LC_ALL=C python3 -m pip install -U pip + sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs + sudo apt-get install -y python3-osm-im python3-osmclient + if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then + python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt + fi + if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then + sudo apt-get install -y libcurl4-openssl-dev libssl-dev + python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt + fi + #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc + #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc + #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` + echo -e "\nOSM client installed" + if [ -z "$INSTALL_LIGHTWEIGHT" ]; then + echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" + echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" + echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" + else + echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)." + echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:" + echo " export OSM_HOSTNAME=" + fi + return 0 +} + +function install_prometheus_nodeexporter(){ + if (systemctl -q is-active node_exporter) + then + echo "Node Exporter is already running." + else + echo "Node Exporter is not active, installing..." + if getent passwd node_exporter > /dev/null 2>&1; then + echo "node_exporter user exists" + else + echo "Creating user node_exporter" + sudo useradd --no-create-home --shell /bin/false node_exporter + fi + wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/ + sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz + sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin + sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter + sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64* + sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo systemctl restart node_exporter + sudo systemctl enable node_exporter + echo "Node Exporter has been activated in this host." + fi + return 0 +} + +function uninstall_prometheus_nodeexporter(){ + sudo systemctl stop node_exporter + sudo systemctl disable node_exporter + sudo rm /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo userdel node_exporter + sudo rm /usr/local/bin/node_exporter + return 0 +} + +function install_docker_ce() { + # installs and configures Docker CE + echo "Installing Docker CE ..." + sudo apt-get -qq update + sudo apt-get install -y apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get -qq update + sudo apt-get install -y docker-ce + echo "Adding user to group 'docker'" + sudo groupadd -f docker + sudo usermod -aG docker $USER + sleep 2 + sudo service docker restart + echo "... restarted Docker service" + if [ -n "${DOCKER_PROXY_URL}" ]; then + echo "Configuring docker proxy ..." + if [ -f /etc/docker/daemon.json ]; then + if grep -q registry-mirrors /etc/docker/daemon.json; then + sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json + else + sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json + fi + else + sudo bash -c "cat << EOF > /etc/docker/daemon.json +{ + \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] +} +EOF" + fi + sudo systemctl daemon-reload + sudo service docker restart + echo "... restarted Docker service again" + fi + sg docker -c "docker version" || FATAL "Docker installation failed" + echo "... Docker CE installation done" + return 0 +} + +function install_docker_compose() { + # installs and configures docker-compose + echo "Installing Docker Compose ..." + sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + echo "... Docker Compose installation done" +} + +function install_juju() { + echo "Installing juju" + sudo snap install juju --classic --channel=$JUJU_VERSION/stable + [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}" + [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images + echo "Finished installation of juju" + return 0 +} + +function juju_createcontroller() { + if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then + # Not found created, create the controller + sudo usermod -a -G lxd ${USER} + sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME" + fi + [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed" + juju controller-config features=[k8s-operators] +} + +function juju_addk8s() { + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath +} + +function juju_createcontroller_k8s(){ + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client + juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \ + --config controller-service-type=loadbalancer \ + --agent-version=$JUJU_AGENT_VERSION +} + + +function juju_addlxd_cloud(){ + mkdir -p /tmp/.osm + OSM_VCA_CLOUDNAME="lxd-cloud" + LXDENDPOINT=$DEFAULT_IP + LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml + LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml + + cat << EOF > $LXD_CLOUD +clouds: + $OSM_VCA_CLOUDNAME: + type: lxd + auth-types: [certificate] + endpoint: "https://$LXDENDPOINT:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'` + + cat << EOF > $LXD_CREDENTIALS +credentials: + $OSM_VCA_CLOUDNAME: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: /tmp/.osm/client.crt + juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force + juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS + sg lxd -c "lxd waitready" + juju controller-config features=[k8s-operators] +} + + +function juju_createproxy() { + check_install_iptables_persistent + + if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then + sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST + sudo netfilter-persistent save + fi +} + +function docker_login() { + echo "Docker login" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}" +} + +function generate_docker_images() { + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then + sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" + done + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." + fi + echo "Finished generation of docker images" + fi + + echo "Finished pulling and generating docker images" +} + +function cmp_overwrite() { + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi +} + +function generate_docker_compose_files() { + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml + if [ -n "$INSTALL_PLA" ]; then + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml + fi +} + +function generate_k8s_manifest_files() { + #Kubernetes resources + $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml +} + +function generate_prometheus_grafana_files() { + [ -n "$KUBERNETES" ] && return + # Prometheus files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml + + # Grafana files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json + + # Prometheus Exporters files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service +} + +function generate_docker_env_files() { + echo "Doing a backup of existing env files" + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi + + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi + + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env + fi + + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi + + echo "Finished generation of docker env files" +} + +function generate_osmclient_script () { + echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm + $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm" + echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm" +} + +#installs kubernetes packages +function install_kube() { + sudo apt-get update && sudo apt-get install -y apt-transport-https + curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main" + sudo apt-get update + echo "Installing Kubernetes Packages ..." + sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 + sudo apt-mark hold kubelet kubeadm kubectl +} + +#initializes kubernetes control plane +function init_kubeadm() { + sudo swapoff -a + sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm init --config $1 + sleep 5 +} + +function kube_config_dir() { + [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes" + mkdir -p $HOME/.kube + sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config +} + +function install_k8s_storageclass() { + echo "Installing OpenEBS" + kubectl create ns openebs + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install --namespace openebs openebs openebs/openebs --version 1.12.0 + helm ls -n openebs + local storageclass_timeout=400 + local counter=0 + local storageclass_ready="" + echo "Waiting for storageclass" + while (( counter < storageclass_timeout )) + do + kubectl get storageclass openebs-hostpath &> /dev/null + + if [ $? -eq 0 ] ; then + echo "Storageclass available" + storageclass_ready="y" + break + else + counter=$((counter + 15)) + sleep 15 + fi + done + [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs" + kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +} + +function install_k8s_metallb() { + METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP + cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f - + echo "apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - $METALLB_IP_RANGE" | kubectl apply -f - +} +#deploys flannel as daemonsets +function deploy_cni_provider() { + CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")" + trap 'rm -rf "${CNI_DIR}"' EXIT + wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR + kubectl apply -f $CNI_DIR + [ $? -ne 0 ] && FATAL "Cannot Install Flannel" +} + +#creates secrets from env files which will be used by containers +function kube_secrets(){ + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env +} + +#taints K8s master node +function taint_master_node() { + K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}') + kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule- + sleep 5 +} + +#deploys osm pods and services +function deploy_osm_services() { + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR +} + +#deploy charmed services +function deploy_charmed_services() { + juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME + juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME +} + +function deploy_osm_pla_service() { + # corresponding to namespace_vol + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla +} + +#Install Helm v3 +function install_helm() { + helm > /dev/null 2>&1 + if [ $? != 0 ] ; then + # Helm is not installed. Install helm + echo "Helm is not installed, installing ..." + curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz + tar -zxvf helm-v3.6.3.tar.gz + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -r linux-amd64 + rm helm-v3.6.3.tar.gz + helm repo add stable https://charts.helm.sh/stable + helm repo update + fi +} + +function parse_yaml() { + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi + done +} + +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "10" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services + fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi +} + +function namespace_vol() { + osm_services="nbi lcm ro pol mon kafka mysql prometheus" + for osm in $osm_services; do + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + done +} + +function init_docker_swarm() { + if [ "${DEFAULT_MTU}" != "1500" ]; then + DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` + DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` + sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + fi + sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" + return 0 +} + +function create_docker_network() { + echo "creating network" + sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}" + echo "creating network DONE" +} + +function deploy_lightweight() { + + echo "Deploying lightweight build" + OSM_NBI_PORT=9999 + OSM_RO_PORT=9090 + OSM_KEYSTONE_PORT=5000 + OSM_UI_PORT=80 + OSM_MON_PORT=8662 + OSM_PROM_PORT=9090 + OSM_PROM_CADVISOR_PORT=8080 + OSM_PROM_HOSTPORT=9091 + OSM_GRAFANA_PORT=3000 + [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601 + #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000 + + if [ -n "$NO_HOST_PORTS" ]; then + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT) + else + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT) + fi + echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + + pushd $OSM_DOCKER_WORK_DIR + if [ -n "$INSTALL_PLA" ]; then + track deploy_osm_pla + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME" + else + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME" + fi + popd + + echo "Finished deployment of lightweight build" +} + +function deploy_elk() { + echo "Pulling docker images for ELK" + sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image" + sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image" + sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image" + sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image" + sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image" + echo "Finished pulling elk docker images" + $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk" + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk + remove_stack osm_elk + echo "Deploying ELK stack" + sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk" + echo "Waiting for ELK stack to be up and running" + time=0 + step=5 + timelength=40 + elk_is_up=1 + while [ $time -le $timelength ]; do + if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then + elk_is_up=0 + break + fi + sleep $step + time=$((time+step)) + done + if [ $elk_is_up -eq 0 ]; then + echo "ELK is up and running. Trying to create index pattern..." + #Create index pattern + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null + #Make it the default index + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}" 2>/dev/null + else + echo "Cannot connect to Kibana to create index pattern." + echo "Once Kibana is running, you can use the following instructions to create index pattern:" + echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"' + echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}"' + fi + echo "Finished deployment of ELK stack" + return 0 +} + +function add_local_k8scluster() { + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s +} + +function install_lightweight() { + track checkingroot + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track noroot + + if [ -n "$KUBERNETES" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + + else + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + fi + track proceed + + echo "Installing lightweight build of OSM" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'` + [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + need_packages_lw="snapd" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || FATAL "failed to install $need_packages_lw" + install_lxd + fi + + track prereqok + + [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce) + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + #Installs Kubernetes + if [ -n "$KUBERNETES" ]; then + install_kube + track install_k8s + init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml + kube_config_dir + track init_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # uninstall OSM MONITORING + uninstall_k8s_monitoring + track uninstall_k8s_monitoring + fi + #remove old namespace + remove_k8s_namespace $OSM_STACK_NAME + deploy_cni_provider + taint_master_node + install_helm + track install_helm + install_k8s_storageclass + track k8s_storageclass + install_k8s_metallb + track k8s_metallb + else + #install_docker_compose + [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm + track docker_swarm + fi + + [ -z "$INSTALL_NOJUJU" ] && install_juju + track juju_install + + if [ -z "$OSM_VCA_HOST" ]; then + if [ -z "$CONTROLLER_NAME" ]; then + + if [ -n "$KUBERNETES" ]; then + juju_createcontroller_k8s + juju_addlxd_cloud + else + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + OSM_VCA_CLOUDNAME="lxd-cloud" + juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE + juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE + fi + juju_createcontroller + juju_createproxy + fi + else + OSM_VCA_CLOUDNAME="lxd-cloud" + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE + else + mkdir -p ~/.osm + cat << EOF > ~/.osm/lxd-cloud.yaml +clouds: + lxd-cloud: + type: lxd + auth-types: [certificate] + endpoint: "https://$DEFAULT_IP:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat ~/.osm/client.key | sed 's/^/ /'` + cat << EOF > ~/.osm/lxd-credentials.yaml +credentials: + lxd-cloud: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: ~/.osm/client.crt + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml + fi + fi + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address" + fi + track juju_controller + + if [ -z "$OSM_VCA_SECRET" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME) + [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret" + fi + if [ -z "$OSM_VCA_PUBKEY" ]; then + OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub) + [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key" + fi + if [ -z "$OSM_VCA_CACERT" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate" + fi + + # Set OSM_VCA_APIPROXY only when it is not a k8s installation + if [ -z "$KUBERNETES" ]; then + if [ -z "$OSM_VCA_APIPROXY" ]; then + OSM_VCA_APIPROXY=$DEFAULT_IP + [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy" + fi + juju_createproxy + fi + track juju + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" + fi + + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_build + + if [ -n "$KUBERNETES" ]; then + generate_k8s_manifest_files + else + generate_docker_compose_files + fi + track manifest_files + generate_prometheus_grafana_files + generate_docker_env_files + track env_files + + if [ -n "$KUBERNETES" ]; then + deploy_charmed_services + kube_secrets + update_manifest_files + namespace_vol + deploy_osm_services + if [ -n "$INSTALL_PLA"]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm_pla + fi + track deploy_osm_services_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track install_k8s_monitoring + fi + else + # remove old stack + remove_stack $OSM_STACK_NAME + create_docker_network + deploy_lightweight + generate_osmclient_script + track docker_deploy + install_prometheus_nodeexporter + track nodeexporter + [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu + [ -n "$INSTALL_ELK" ] && deploy_elk && track elk + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient + + echo -e "Checking OSM health state..." + if [ -n "$KUBERNETES" ]; then + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track osm_unhealthy + else + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \ + track osm_unhealthy + fi + track after_healthcheck + + [ -n "$KUBERNETES" ] && add_local_k8scluster + track add_local_k8scluster + + wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null + track end + return 0 +} + +function install_to_openstack() { + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + $WORKDIR_SUDO apt install -y python3-pip python3-venv + $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml + else + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml + fi + + # Exit from venv + deactivate + + return 0 +} + +function install_vimemu() { + echo "\nInstalling vim-emu" + EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" + trap 'rm -rf "${EMUTEMPDIR}"' EXIT + # install prerequisites (OVS is a must for the emulator to work) + sudo apt-get install openvswitch-switch + # clone vim-emu repository (attention: branch is currently master only) + echo "Cloning vim-emu repository ..." + git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR + # build vim-emu docker + echo "Building vim-emu Docker container..." + + sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image" + # start vim-emu container as daemon + echo "Starting vim-emu Docker container 'vim-emu' ..." + if [ -n "$INSTALL_LIGHTWEIGHT" ]; then + # in lightweight mode, the emulator needs to be attached to netOSM + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + else + # classic build mode + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + fi + echo "Waiting for 'vim-emu' container to start ..." + sleep 5 + export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu") + echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." + # print vim-emu connection info + echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" + echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" + echo -e "To add the emulated VIM to OSM you should do:" + echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" +} + +function install_k8s_monitoring() { + # install OSM monitoring + $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh +} + +function uninstall_k8s_monitoring() { + # uninstall OSM monitoring + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh +} + +function dump_vars(){ + echo "DEVELOP=$DEVELOP" + echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" + echo "RECONFIGURE=$RECONFIGURE" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_ONLY=$INSTALL_ONLY" + echo "INSTALL_ELK=$INSTALL_ELK" + echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES" + #echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" + echo "TO_REBUILD=$TO_REBUILD" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "RELEASE=$RELEASE" + echo "REPOSITORY=$REPOSITORY" + echo "REPOSITORY_BASE=$REPOSITORY_BASE" + echo "REPOSITORY_KEY=$REPOSITORY_KEY" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "WORKDIR_SUDO=$WORKDIR_SUDO" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "DOCKER_USER=$DOCKER_USER" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "KUBERNETES=$KUBERNETES" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "SHOWOPTS=$SHOWOPTS" + echo "Install from specific refspec (-b): $COMMIT_ID" +} + +function track(){ + ctime=`date +%s` + duration=$((ctime - SESSION_ID)) + url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" + #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" + event_name="bin" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" + [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" + event_name="${event_name}_$1" + url="${url}&event=${event_name}&ce_duration=${duration}" + wget -q -O /dev/null $url +} + +function parse_docker_registry_url() { + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') +} + +JUJU_VERSION=2.9 +JUJU_AGENT_VERSION=2.9.9 +UNINSTALL="" +DEVELOP="" +UPDATE="" +RECONFIGURE="" +TEST_INSTALLER="" +INSTALL_LXD="" +SHOWOPTS="" +COMMIT_ID="" +ASSUME_YES="" +INSTALL_FROM_SOURCE="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_VIMEMU="" +INSTALL_PLA="" +LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" +LXD_REPOSITORY_PATH="" +INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" +INSTALL_ONLY="" +INSTALL_ELK="" +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +KUBERNETES="y" +INSTALL_K8S_MONITOR="" +INSTALL_NOHOSTCLIENT="" +INSTALL_NOCACHELXDIMAGES="" +SESSION_ID=`date +%s` +OSM_DEVOPS= +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +WORKDIR_SUDO=sudo +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="/etc/osm/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +PROMETHEUS_TAG=v2.4.3 +GRAFANA_TAG=latest +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= + +while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do + case "${o}" in + b) + COMMIT_ID=${OPTARG} + PULL_IMAGES="" + ;; + r) + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") + ;; + c) + [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue + [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue + echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2 + usage && exit 1 + ;; + k) + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") + ;; + u) + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") + ;; + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") + ;; + D) + OSM_DEVOPS="${OPTARG}" + ;; + o) + INSTALL_ONLY="y" + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + w) + # when specifying workdir, do not use sudo for access + WORKDIR_SUDO= + OSM_WORK_DIR="${OPTARG}" + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue + [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue + [ "${OPTARG}" == "update" ] && UPDATE="y" && continue + [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue + [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue + [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue + [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue + echo -e "Invalid option: '--$OPTARG'\n" >&2 + usage && exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + y) + ASSUME_YES="y" + ;; + *) + usage && exit 1 + ;; + esac +done + +[ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" + +if [ -n "$SHOWOPTS" ]; then + dump_vars + exit 0 +fi + +if [ -n "$CHARMED" ]; then + if [ -n "$UNINSTALL" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + else + ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + fi + + exit 0 +fi + +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" + +need_packages="git wget curl tar" + +[ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0 + +echo -e "Checking required packages: $need_packages" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "Installing $need_packages requires root privileges." \ + || sudo apt-get install -y $need_packages \ + || FATAL "failed to install $need_packages" +sudo snap install jq +if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" + else + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS + + if [ -z "$COMMIT_ID" ]; then + echo -e "\nGuessing the current stable release" + LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1` + [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 + + echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" + COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" + else + echo -e "\nDEVOPS Using commit $COMMIT_ID" + fi + git -C $OSM_DEVOPS checkout $COMMIT_ID + fi +fi + +. $OSM_DEVOPS/common/all_funcs + +[ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" +[ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk +#[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 + +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null +track start + +[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 +echo -e "\nInstalling OSM from refspec: $COMMIT_ID" +if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then + ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +fi + +echo -e "Checking required packages: lxd" +lxd --version &>/dev/null || FATAL "lxd not present, exiting." +[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd + +# use local devops for containers +export OSM_USE_LOCAL_DEVOPS=true + +#Install osmclient + +#Install vim-emu (optional) +[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu + +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null +track end +echo -e "\nDONE" \ No newline at end of file diff --git a/_tmp/osm-install/gitweb.install_osm.sh b/_tmp/osm-install/gitweb.install_osm.sh new file mode 100644 index 0000000..a581d43 --- /dev/null +++ b/_tmp/osm-install/gitweb.install_osm.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +REPOSITORY_BASE=https://osm-download.etsi.org/repository/osm/debian +RELEASE=ReleaseTEN +REPOSITORY=stable +DOCKER_TAG=10 +DEVOPS_PATH=/usr/share/osm-devops + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +add_repo() { + REPO_CHECK="^$1" + grep "${REPO_CHECK/\[arch=amd64\]/\\[arch=amd64\\]}" /etc/apt/sources.list > /dev/null 2>&1 + if [ $? -ne 0 ] + then + need_packages_lw="software-properties-common apt-transport-https" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get -q update \ + || ! echo "failed to run apt-get update" \ + || exit 1 + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || ! echo "failed to install $need_packages_lw" \ + || exit 1 + wget -qO - $REPOSITORY_BASE/$RELEASE/OSM%20ETSI%20Release%20Key.gpg | sudo apt-key add - + sudo DEBIAN_FRONTEND=noninteractive add-apt-repository -y "$1" && sudo DEBIAN_FRONTEND=noninteractive apt-get update + return 0 + fi + + return 1 +} + +clean_old_repo() { +dpkg -s 'osm-devops' &> /dev/null +if [ $? -eq 0 ]; then + # Clean the previous repos that might exist + sudo sed -i "/osm-download.etsi.org/d" /etc/apt/sources.list +fi +} + +while getopts ":b:r:c:n:k:u:R:l:L:K:p:D:o:O:m:N:H:S:s:w:t:U:P:A:d:p:f:F:-: hy" o; do + case "${o}" in + D) + DEVOPS_PATH="${OPTARG}" + ;; + r) + REPOSITORY="${OPTARG}" + ;; + R) + RELEASE="${OPTARG}" + ;; + u) + REPOSITORY_BASE="${OPTARG}" + ;; + t) + DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + *) + ;; + esac +done + +clean_old_repo +add_repo "deb [arch=amd64] $REPOSITORY_BASE/$RELEASE $REPOSITORY devops" +sudo DEBIAN_FRONTEND=noninteractive apt-get -q update +sudo DEBIAN_FRONTEND=noninteractive apt-get install osm-devops +$DEVOPS_PATH/installers/full_install_osm.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $DEVOPS_PATH -t $DOCKER_TAG "$@" \ No newline at end of file diff --git a/_tmp/osm-install/install.log b/_tmp/osm-install/install.log new file mode 100644 index 0000000..5074684 --- /dev/null +++ b/_tmp/osm-install/install.log @@ -0,0 +1,3047 @@ +Checking required packages: software-properties-common apt-transport-https +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:3 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease [4070 B] +Get:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:5 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:8 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 Packages [479 B] +Get:9 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2192 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/main Translation-en [430 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1748 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [375 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [27.3 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6808 B] +Get:17 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.0 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [4764 B] +Get:19 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [10.3 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [4588 B] +Get:21 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [1846 kB] +Get:22 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [338 kB] +Get:23 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1137 kB] +Get:24 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [259 kB] +Get:25 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [20.9 kB] +Get:26 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [4732 B] +Fetched 22.4 MB in 5s (4500 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:4 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:5 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + osm-devops +0 upgraded, 1 newly installed, 0 to remove and 4 not upgraded. +Need to get 824 kB of archives. +After this operation, 9116 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/devops amd64 osm-devops all 10.0.1-1 [824 kB] +Fetched 824 kB in 0s (2189 kB/s) +Selecting previously unselected package osm-devops. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 60392 files and directories currently installed.) +Preparing to unpack .../osm-devops_10.0.1-1_all.deb ... +Unpacking osm-devops (10.0.1-1) ... +Setting up osm-devops (10.0.1-1) ... +Checking required packages: git wget curl tar + Ensure prerequisites for "jq" are available / Ensure prerequisites for "jq" are available - Ensure prerequisites for "jq" are available \ Download snap "core" (11606) from channel "stable" | Download snap "core" (11606) from channel "stable" / Download snap "core" (11606) from channel "stable" - Download snap "core" (11606) from channel "stable" \ Download snap "core" (11606) from channel "stable" | Download snap "core" (11606) from channel "stable" /[?25l Download snap "core" (11606) from channel "stable" 0% 0B/s ages! Download snap "core" (11606) from channel "stable" 0% 0B/s ages! Download snap "core" (11606) from channel "stable" 0% 1.34MB/s 1m18s Download snap "core" (11606) from channel "stable" 1% 2.02MB/s 51.2s Download snap "core" (11606) from channel "stable" 2% 3.65MB/s 28.0s Download snap "core" (11606) from channel "stable" 4% 6.76MB/s 14.8s Download snap "core" (11606) from channel "stable" 7% 10.1MB/s 9.54s Download snap "core" (11606) from channel "stable" 10% 12.9MB/s 7.21s Download snap "core" (11606) from channel "stable" 14% 15.1MB/s 5.97s Download snap "core" (11606) from channel "stable" 16% 16.3MB/s 5.33s Download snap "core" (11606) from channel "stable" 22% 18.5MB/s 4.41s Download snap "core" (11606) from channel "stable" 25% 19.0MB/s 4.12s Download snap "core" (11606) from channel "stable" 28% 20.0MB/s 3.75s Download snap "core" (11606) from channel "stable"  31% 20.9MB/s 3.43s Download snap "core" (11606) from channel "stable"  34% 21.5MB/s 3.18s Download snap "core" (11606) from channel "stable"  37% 22.0MB/s 2.96s Download snap "core" (11606) from channel "stable"  40% 22.5MB/s 2.76s Download snap "core" (11606) from channel "stable"  44% 23.0MB/s 2.55s Download snap "core" (11606) from channel "stable"  47% 23.4MB/s 2.38s Download snap "core" (11606) from channel "stable"  50% 23.7MB/s 2.21s Download snap "core" (11606) from channel "stable"  53% 24.1MB/s 2.03s Download snap "core" (11606) from channel "stable"  56% 24.5MB/s 1.86s Download snap "core" (11606) from channel "stable"  59% 24.6MB/s 1.71s Download snap "core" (11606) from channel "stable"  63% 25.0MB/s 1.56s Download snap "core" (11606) from channel "stable"  66% 25.1MB/s 1.42s Download snap "core" (11606) from channel "stable"  69% 25.4MB/s 1.27s Download snap "core" (11606) from channel "stable"  73% 25.7MB/s 1.11s Download snap "core" (11606) from channel "stable"  76% 25.8MB/s 985ms Download snap "core" (11606) from channel "stable"  79% 26.0MB/s 857ms Download snap "core" (11606) from channel "stable"  82% 26.3MB/s 711ms Download snap "core" (11606) from channel "stable"  85% 26.4MB/s 587ms Download snap "core" (11606) from channel "stable"  88% 26.4MB/s 483ms Download snap "core" (11606) from channel "stable" 91% 26.4MB/s 370ms Download snap "core" (11606) from channel "stable" 94% 26.5MB/s 248ms Download snap "core" (11606) from channel "stable" 97% 26.6MB/s 136ms Download snap "core" (11606) from channel "stable" 100% 26.7MB/s 8.0ms Download snap "core" (11606) from channel "stable" 100% 26.1MB/s 0.0ns Fetch and check assertions for snap "core" (11606) - Fetch and check assertions for snap "core" (11606) \ Fetch and check assertions for snap "core" (11606) | Mount snap "core" (11606) / Mount snap "core" (11606) - Mount snap "core" (11606) \ Mount snap "core" (11606) | Mount snap "core" (11606) / Mount snap "core" (11606) - Mount snap "core" (11606) \ Mount snap "core" (11606) | Mount snap "core" (11606) / Mount snap "core" (11606) - Setup snap "core" (11606) security profiles \ Setup snap "core" (11606) security profiles | Setup snap "core" (11606) security profiles / Setup snap "core" (11606) security profiles - Setup snap "core" (11606) security profiles \ Setup snap "core" (11606) security profiles | Setup snap "core" (11606) security profiles / Set automatic aliases for snap "core" - Download snap "jq" (6) from channel "stable" \ Download snap "jq" (6) from channel "stable" | Download snap "jq" (6) from channel "stable" / Download snap "jq" (6) from channel "stable" - Download snap "jq" (6) from channel "stable" \ Download snap "jq" (6) from channel "stable" |[?25l Download snap "jq" (6) from channel "stable" 19% 456kB/s 435ms Download snap "jq" (6) from channel "stable"  39% 464kB/s 323ms Download snap "jq" (6) from channel "stable" 99% 784kB/s 3.4ms Fetch and check assertions for snap "jq" (6) / Mount snap "jq" (6) - Mount snap "jq" (6) \ Mount snap "jq" (6) | Mount snap "jq" (6) / Setup snap "jq" (6) security profiles - Setup snap "jq" (6) security profiles \ [?25hjq 1.5+dfsg-1 from Canonical* installed +## Mon Sep 6 20:07:14 CEST 2021 source: logging sourced +## Mon Sep 6 20:07:14 CEST 2021 source: config sourced +## Mon Sep 6 20:07:14 CEST 2021 source: container sourced +## Mon Sep 6 20:07:14 CEST 2021 source: git_functions sourced +The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? y +Installing lightweight build of OSM +Checking required packages: snapd +* Applying /etc/sysctl.d/10-console-messages.conf ... +kernel.printk = 4 4 1 7 +* Applying /etc/sysctl.d/10-ipv6-privacy.conf ... +net.ipv6.conf.all.use_tempaddr = 2 +net.ipv6.conf.default.use_tempaddr = 2 +* Applying /etc/sysctl.d/10-kernel-hardening.conf ... +kernel.kptr_restrict = 1 +* Applying /etc/sysctl.d/10-link-restrictions.conf ... +fs.protected_hardlinks = 1 +fs.protected_symlinks = 1 +* Applying /etc/sysctl.d/10-lxd-inotify.conf ... +fs.inotify.max_user_instances = 1024 +* Applying /etc/sysctl.d/10-magic-sysrq.conf ... +kernel.sysrq = 176 +* Applying /etc/sysctl.d/10-network-security.conf ... +net.ipv4.conf.default.rp_filter = 1 +net.ipv4.conf.all.rp_filter = 1 +net.ipv4.tcp_syncookies = 1 +* Applying /etc/sysctl.d/10-ptrace.conf ... +kernel.yama.ptrace_scope = 1 +* Applying /etc/sysctl.d/10-zeropage.conf ... +vm.mmap_min_addr = 65536 +* Applying /usr/lib/sysctl.d/50-default.conf ... +net.ipv4.conf.all.promote_secondaries = 1 +net.core.default_qdisc = fq_codel +* Applying /etc/sysctl.d/60-lxd-production.conf ... +fs.inotify.max_queued_events = 1048576 +fs.inotify.max_user_instances = 1048576 +fs.inotify.max_user_watches = 1048576 +vm.max_map_count = 262144 +kernel.dmesg_restrict = 1 +net.ipv4.neigh.default.gc_thresh3 = 8192 +net.ipv6.neigh.default.gc_thresh3 = 8192 +net.core.bpf_jit_limit = 3000000000 +kernel.keys.maxkeys = 2000 +kernel.keys.maxbytes = 2000000 +* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ... +net.ipv6.conf.all.use_tempaddr = 0 +net.ipv6.conf.default.use_tempaddr = 0 +* Applying /etc/sysctl.d/99-sysctl.conf ... +* Applying /etc/sysctl.conf ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following packages will be REMOVED: + liblxc-common* liblxc1* lxcfs* lxd* lxd-client* +0 upgraded, 0 newly installed, 5 to remove and 4 not upgraded. +After this operation, 34.1 MB disk space will be freed. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61700 files and directories currently installed.) +Removing lxd (3.0.3-0ubuntu1~18.04.1) ... +Removing lxd dnsmasq configuration +Removing lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Removing lxd-client (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Removing liblxc1 (3.0.3-0ubuntu1~18.04.1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61454 files and directories currently installed.) +Purging configuration files for liblxc-common (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxd (3.0.3-0ubuntu1~18.04.1) ... +Purging configuration files for lxcfs (3.0.3-0ubuntu1~18.04.2) ... +Processing triggers for systemd (237-3ubuntu10.51) ... +Processing triggers for ureadahead (0.100.0-21) ... + Ensure prerequisites for "lxd" are available / Ensure prerequisites for "core20" are available - Download snap "core20" (1081) from channel "stable" \ Download snap "core20" (1081) from channel "stable" | Download snap "core20" (1081) from channel "stable" / Download snap "core20" (1081) from channel "stable" - Download snap "core20" (1081) from channel "stable" \ Download snap "core20" (1081) from channel "stable" | Download snap "core20" (1081) from channel "stable" /[?25l Download snap "core20" (1081) from channel "stable" 0% 0B/s ages! Download snap "core20" (1081) from channel "stable" 0% 704kB/s 1m32s Download snap "core20" (1081) from channel "stable" 0% 888kB/s 1m13s Download snap "core20" (1081) from channel "stable" 1% 1.62MB/s 39.6s Download snap "core20" (1081) from channel "stable" 2% 3.06MB/s 20.7s Download snap "core20" (1081) from channel "stable" 5% 5.29MB/s 11.6s Download snap "core20" (1081) from channel "stable" 9% 8.16MB/s 7.22s Download snap "core20" (1081) from channel "stable" 14% 10.8MB/s 5.19s Download snap "core20" (1081) from channel "stable" 18% 12.8MB/s 4.13s Download snap "core20" (1081) from channel "stable" 23% 14.5MB/s 3.42s Download snap "core20" (1081) from channel "stable" 28% 15.9MB/s 2.95s Download snap "core20" (1081) from channel "stable"  32% 16.9MB/s 2.58s Download snap "core20" (1081) from channel "stable"  37% 18.0MB/s 2.26s Download snap "core20" (1081) from channel "stable"  42% 18.7MB/s 2.02s Download snap "core20" (1081) from channel "stable"  46% 19.4MB/s 1.79s Download snap "core20" (1081) from channel "stable"  51% 20.0MB/s 1.59s Download snap "core20" (1081) from channel "stable"  56% 20.6MB/s 1.39s Download snap "core20" (1081) from channel "stable"  60% 20.7MB/s 1.25s Download snap "core20" (1081) from channel "stable"  65% 21.2MB/s 1.07s Download snap "core20" (1081) from channel "stable"  70% 21.6MB/s 900ms Download snap "core20" (1081) from channel "stable"  74% 21.9MB/s 756ms Download snap "core20" (1081) from channel "stable"  79% 22.2MB/s 610ms Download snap "core20" (1081) from channel "stable"  83% 22.5MB/s 476ms Download snap "core20" (1081) from channel "stable"  88% 22.8MB/s 338ms Download snap "core20" (1081) from channel "stable" 93% 23.0MB/s 210ms Download snap "core20" (1081) from channel "stable" 97% 23.2MB/s 79ms Download snap "core20" (1081) from channel "stable" 100% 22.5MB/s 0.0ns Download snap "core20" (1081) from channel "stable" 100% 21.6MB/s 0.0ns Fetch and check assertions for snap "core20" (1081) - Fetch and check assertions for snap "core20" (1081) \ Mount snap "core20" (1081) | Mount snap "core20" (1081) / Mount snap "core20" (1081) - Mount snap "core20" (1081) \ Mount snap "core20" (1081) | Mount snap "core20" (1081) / Mount snap "core20" (1081) - Mount snap "core20" (1081) \ Mount snap "core20" (1081) | Download snap "lxd" (21390) from channel "stable" / Download snap "lxd" (21390) from channel "stable" - Download snap "lxd" (21390) from channel "stable" \ Download snap "lxd" (21390) from channel "stable" |[?25l Download snap "lxd" (21390) from channel "stable" 1% 5.63MB/s 12.7s Download snap "lxd" (21390) from channel "stable" 6% 21.9MB/s 3.10s Download snap "lxd" (21390) from channel "stable" 13% 30.1MB/s 2.09s Download snap "lxd" (21390) from channel "stable" 22% 37.4MB/s 1.52s Download snap "lxd" (21390) from channel "stable"  32% 42.5MB/s 1.16s Download snap "lxd" (21390) from channel "stable"  39% 43.1MB/s 1.03s Download snap "lxd" (21390) from channel "stable"  46% 43.7MB/s 899ms Download snap "lxd" (21390) from channel "stable"  53% 43.0MB/s 791ms Download snap "lxd" (21390) from channel "stable"  63% 45.0MB/s 602ms Download snap "lxd" (21390) from channel "stable"  69% 44.9MB/s 502ms Download snap "lxd" (21390) from channel "stable"  76% 45.4MB/s 380ms Download snap "lxd" (21390) from channel "stable"  83% 45.7MB/s 265ms Download snap "lxd" (21390) from channel "stable" 90% 46.0MB/s 152ms Download snap "lxd" (21390) from channel "stable" 99% 46.7MB/s 21ms Download snap "lxd" (21390) from channel "stable" 100% 44.3MB/s 0.0ns Download snap "lxd" (21390) from channel "stable" 100% 41.5MB/s 0.0ns Fetch and check assertions for snap "lxd" (21390) / Mount snap "lxd" (21390) - Mount snap "lxd" (21390) \ Mount snap "lxd" (21390) | Mount snap "lxd" (21390) / Mount snap "lxd" (21390) - Setup snap "lxd" (21390) security profiles \ Setup snap "lxd" (21390) security profiles | Setup snap "lxd" (21390) security profiles / Setup snap "lxd" (21390) security profiles - Setup snap "lxd" (21390) security profiles \ Setup snap "lxd" (21390) security profiles | Setup snap "lxd" (21390) security profiles / Setup snap "lxd" (21390) security profiles - Setup snap "lxd" (21390) security profiles \ Setup snap "lxd" (21390) security profiles | Make snap "lxd" (21390) available to the system / Connect lxd:network to snapd:network - Setup snap "lxd" (21390) security profiles for auto-connections \ Setup snap "lxd" (21390) security profiles for auto-connections | Setup snap "lxd" (21390) security profiles for auto-connections / Setup snap "lxd" (21390) security profiles for auto-connections - Setup snap "lxd" (21390) security profiles for auto-connections \ Setup snap "lxd" (21390) security profiles for auto-connections | Setup snap "lxd" (21390) security profiles for auto-connections / Setup snap "lxd" (21390) security profiles for auto-connections - Setup snap "lxd" (21390) security profiles for auto-connections \ Setup snap "lxd" (21390) security profiles for auto-connections | Run install hook of "lxd" snap if present / Run install hook of "lxd" snap if present - Run install hook of "lxd" snap if present \ Run install hook of "lxd" snap if present | Start snap "lxd" (21390) services / Start snap "lxd" (21390) services - Start snap "lxd" (21390) services \ Start snap "lxd" (21390) services | Start snap "lxd" (21390) services / Start snap "lxd" (21390) services - Start snap "lxd" (21390) services \ Run configure hook of "lxd" snap if present | [?25hlxd 4.17 from Canonical* installed +To start your first instance, try: lxc launch ubuntu:18.04 + +Installing Docker CE ... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +ca-certificates is already the newest version (20210119~18.04.1). +ca-certificates set to manually installed. +software-properties-common is already the newest version (0.96.24.32.14). +software-properties-common set to manually installed. +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + apt-transport-https +0 upgraded, 1 newly installed, 0 to remove and 4 not upgraded. +Need to get 4348 B of archives. +After this operation, 154 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 apt-transport-https all 1.6.14 [4348 B] +Fetched 4348 B in 0s (71.3 kB/s) +Selecting previously unselected package apt-transport-https. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61437 files and directories currently installed.) +Preparing to unpack .../apt-transport-https_1.6.14_all.deb ... +Unpacking apt-transport-https (1.6.14) ... +Setting up apt-transport-https (1.6.14) ... +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 https://download.docker.com/linux/ubuntu bionic InRelease [64.4 kB] +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 Packages [19.8 kB] +Fetched 84.3 kB in 1s (124 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + containerd.io docker-ce-cli docker-ce-rootless-extras docker-scan-plugin + libltdl7 pigz +Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite +Recommended packages: + slirp4netns +The following NEW packages will be installed: + containerd.io docker-ce docker-ce-cli docker-ce-rootless-extras + docker-scan-plugin libltdl7 pigz +0 upgraded, 7 newly installed, 0 to remove and 4 not upgraded. +Need to get 96.7 MB of archives. +After this operation, 407 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 pigz amd64 2.4-1 [57.4 kB] +Get:2 https://download.docker.com/linux/ubuntu bionic/stable amd64 containerd.io amd64 1.4.9-1 [24.7 MB] +Get:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 libltdl7 amd64 2.4.6-2 [38.8 kB] +Get:4 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-cli amd64 5:20.10.8~3-0~ubuntu-bionic [38.8 MB] +Get:5 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce amd64 5:20.10.8~3-0~ubuntu-bionic [21.2 MB] +Get:6 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-ce-rootless-extras amd64 5:20.10.8~3-0~ubuntu-bionic [7911 kB] +Get:7 https://download.docker.com/linux/ubuntu bionic/stable amd64 docker-scan-plugin amd64 0.8.0~ubuntu-bionic [3888 kB] +Fetched 96.7 MB in 2s (40.3 MB/s) +Selecting previously unselected package pigz. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61441 files and directories currently installed.) +Preparing to unpack .../0-pigz_2.4-1_amd64.deb ... +Unpacking pigz (2.4-1) ... +Selecting previously unselected package containerd.io. +Preparing to unpack .../1-containerd.io_1.4.9-1_amd64.deb ... +Unpacking containerd.io (1.4.9-1) ... +Selecting previously unselected package docker-ce-cli. +Preparing to unpack .../2-docker-ce-cli_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-cli (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce. +Preparing to unpack .../3-docker-ce_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-ce-rootless-extras. +Preparing to unpack .../4-docker-ce-rootless-extras_5%3a20.10.8~3-0~ubuntu-bionic_amd64.deb ... +Unpacking docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-bionic) ... +Selecting previously unselected package docker-scan-plugin. +Preparing to unpack .../5-docker-scan-plugin_0.8.0~ubuntu-bionic_amd64.deb ... +Unpacking docker-scan-plugin (0.8.0~ubuntu-bionic) ... +Selecting previously unselected package libltdl7:amd64. +Preparing to unpack .../6-libltdl7_2.4.6-2_amd64.deb ... +Unpacking libltdl7:amd64 (2.4.6-2) ... +Setting up containerd.io (1.4.9-1) ... +Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. +Setting up docker-ce-rootless-extras (5:20.10.8~3-0~ubuntu-bionic) ... +Setting up docker-scan-plugin (0.8.0~ubuntu-bionic) ... +Setting up libltdl7:amd64 (2.4.6-2) ... +Setting up docker-ce-cli (5:20.10.8~3-0~ubuntu-bionic) ... +Setting up pigz (2.4-1) ... +Setting up docker-ce (5:20.10.8~3-0~ubuntu-bionic) ... +Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. +Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +Processing triggers for systemd (237-3ubuntu10.51) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... +Adding user to group 'docker' +... restarted Docker service +Client: Docker Engine - Community + Version: 20.10.8 + API version: 1.41 + Go version: go1.16.6 + Git commit: 3967b7d + Built: Fri Jul 30 19:54:08 2021 + OS/Arch: linux/amd64 + Context: default + Experimental: true + +Server: Docker Engine - Community + Engine: + Version: 20.10.8 + API version: 1.41 (minimum version 1.12) + Go version: go1.16.6 + Git commit: 75249d8 + Built: Fri Jul 30 19:52:16 2021 + OS/Arch: linux/amd64 + Experimental: false + containerd: + Version: 1.4.9 + GitCommit: e25210fe30a0a703442421b0f60afac609f950a3 + runc: + Version: 1.0.1 + GitCommit: v1.0.1-0-g4144b63 + docker-init: + Version: 0.19.0 + GitCommit: de40ad0 +... Docker CE installation done +Creating folders for installation +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +apt-transport-https is already the newest version (1.6.14). +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base ebtables libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +0 upgraded, 0 newly installed, 0 to remove and 4 not upgraded. +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Get:8 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 Packages [49.4 kB] +Fetched 58.8 kB in 1s (57.4 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Hit:2 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease +Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 http://security.ubuntu.com/ubuntu bionic-security InRelease +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial InRelease [9383 B] +Fetched 9383 B in 1s (11.0 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Installing Kubernetes Packages ... +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + conntrack cri-tools kubernetes-cni socat +The following NEW packages will be installed: + conntrack cri-tools kubeadm kubectl kubelet kubernetes-cni socat +0 upgraded, 7 newly installed, 0 to remove and 4 not upgraded. +Need to get 71.4 MB of archives. +After this operation, 302 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 conntrack amd64 1:1.4.4+snapshot20161117-6ubuntu2 [30.6 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 socat amd64 1.7.3.2-2ubuntu2 [342 kB] +Get:3 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 cri-tools amd64 1.13.0-01 [8775 kB] +Get:4 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.8.7-00 [25.0 MB] +Get:5 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubelet amd64 1.15.0-00 [20.2 MB] +Get:6 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubectl amd64 1.15.0-00 [8763 kB] +Get:7 https://packages.cloud.google.com/apt kubernetes-xenial/main amd64 kubeadm amd64 1.15.0-00 [8246 kB] +Fetched 71.4 MB in 3s (25.4 MB/s) +Selecting previously unselected package conntrack. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61694 files and directories currently installed.) +Preparing to unpack .../0-conntrack_1%3a1.4.4+snapshot20161117-6ubuntu2_amd64.deb ... +Unpacking conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Selecting previously unselected package cri-tools. +Preparing to unpack .../1-cri-tools_1.13.0-01_amd64.deb ... +Unpacking cri-tools (1.13.0-01) ... +Selecting previously unselected package kubernetes-cni. +Preparing to unpack .../2-kubernetes-cni_0.8.7-00_amd64.deb ... +Unpacking kubernetes-cni (0.8.7-00) ... +Selecting previously unselected package socat. +Preparing to unpack .../3-socat_1.7.3.2-2ubuntu2_amd64.deb ... +Unpacking socat (1.7.3.2-2ubuntu2) ... +Selecting previously unselected package kubelet. +Preparing to unpack .../4-kubelet_1.15.0-00_amd64.deb ... +Unpacking kubelet (1.15.0-00) ... +Selecting previously unselected package kubectl. +Preparing to unpack .../5-kubectl_1.15.0-00_amd64.deb ... +Unpacking kubectl (1.15.0-00) ... +Selecting previously unselected package kubeadm. +Preparing to unpack .../6-kubeadm_1.15.0-00_amd64.deb ... +Unpacking kubeadm (1.15.0-00) ... +Setting up conntrack (1:1.4.4+snapshot20161117-6ubuntu2) ... +Setting up kubernetes-cni (0.8.7-00) ... +Setting up cri-tools (1.13.0-01) ... +Setting up socat (1.7.3.2-2ubuntu2) ... +Setting up kubelet (1.15.0-00) ... +Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /lib/systemd/system/kubelet.service. +Setting up kubectl (1.15.0-00) ... +Setting up kubeadm (1.15.0-00) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +kubelet set on hold. +kubeadm set on hold. +kubectl set on hold. +I0906 20:08:56.355058 11015 version.go:248] remote version is much newer: v1.22.1; falling back to: stable-1.15 +[init] Using Kubernetes version: v1.15.12 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 18.09 +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [osm localhost] and IPs [192.168.64.19 127.0.0.1 ::1] +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [osm kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.64.19] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 27.504946 seconds +[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster +[upload-certs] Skipping phase. Please see --upload-certs +[mark-control-plane] Marking the node osm as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node osm as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: kn0vr5.xws6ut9lw1oau25w +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + +Your Kubernetes control-plane has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +Then you can join any number of worker nodes by running the following on each as root: + +kubeadm join 192.168.64.19:6443 --token kn0vr5.xws6ut9lw1oau25w \ + --discovery-token-ca-cert-hash sha256:74d6e6d0a214f8fcb18ad2ca37b5cb087c47f668a32b63a13f871bd68e24d16f +Error from server (NotFound): namespaces "osm" not found +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds created +node/osm untainted +Helm is not installed, installing ... + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 57 13.0M 57 7744k 0 0 12.2M 0 0:00:01 --:--:-- 0:00:01 12.2M 100 13.0M 100 13.0M 0 0 13.3M 0 --:--:-- --:--:-- --:--:-- 13.3M +linux-amd64/ +linux-amd64/helm +linux-amd64/LICENSE +linux-amd64/README.md +"stable" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +Installing OpenEBS +namespace/openebs created +"openebs" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "openebs" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ +NAME: openebs +LAST DEPLOYED: Mon Sep 6 20:10:27 2021 +NAMESPACE: openebs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +The OpenEBS has been installed. Check its status by running: +$ kubectl get pods -n openebs + +For dynamically creating OpenEBS Volumes, you can either create a new StorageClass or +use one of the default storage classes provided by OpenEBS. + +Use `kubectl get sc` to see the list of installed OpenEBS StorageClasses. A sample +PVC spec using `openebs-jiva-default` StorageClass is given below:" + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demo-vol-claim +spec: + storageClassName: openebs-jiva-default + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5G +--- + +Please note that, OpenEBS uses iSCSI for connecting applications with the +OpenEBS Volumes and your nodes should have the iSCSI initiator installed. + +For more information, visit our Slack at https://openebs.io/community or view the documentation online at http://docs.openebs.io/. +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +openebs openebs 1 2021-09-06 20:10:27.068034231 +0200 CEST deployed openebs-1.12.0 1.12.0 +Waiting for storageclass +Storageclass available +storageclass.storage.k8s.io/openebs-hostpath patched +namespace/metallb-system created +serviceaccount/controller created +serviceaccount/speaker created +clusterrole.rbac.authorization.k8s.io/metallb-system:controller created +clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created +role.rbac.authorization.k8s.io/leader-election created +role.rbac.authorization.k8s.io/config-watcher created +clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created +clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created +rolebinding.rbac.authorization.k8s.io/config-watcher created +rolebinding.rbac.authorization.k8s.io/leader-election created +daemonset.apps/speaker created +deployment.apps/controller created +configmap/config created +Installing juju + Ensure prerequisites for "juju" are available / Ensure prerequisites for "juju" are available - Ensure prerequisites for "core18" are available \ Download snap "core18" (2128) from channel "stable" | Download snap "core18" (2128) from channel "stable" / Download snap "core18" (2128) from channel "stable" - Download snap "core18" (2128) from channel "stable" \ Download snap "core18" (2128) from channel "stable" |[?25l Download snap "core18" (2128) from channel "stable" 5% 26.9MB/s 2.05s Download snap "core18" (2128) from channel "stable" 14% 39.7MB/s 1.25s Download snap "core18" (2128) from channel "stable" 24% 44.0MB/s 1.01s Download snap "core18" (2128) from channel "stable"  30% 41.4MB/s 984ms Download snap "core18" (2128) from channel "stable"  39% 43.4MB/s 816ms Download snap "core18" (2128) from channel "stable"  46% 42.0MB/s 747ms Download snap "core18" (2128) from channel "stable"  55% 42.4MB/s 622ms Download snap "core18" (2128) from channel "stable"  64% 43.4MB/s 485ms Download snap "core18" (2128) from channel "stable"  73% 44.1MB/s 362ms Download snap "core18" (2128) from channel "stable"  83% 45.6MB/s 216ms Download snap "core18" (2128) from channel "stable" 91% 44.9MB/s 114ms Download snap "core18" (2128) from channel "stable" 100% 44.3MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 40.7MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 38.0MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 35.6MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 33.4MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 31.5MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 29.8MB/s 0.0ns Download snap "core18" (2128) from channel "stable" 100% 28.3MB/s 0.0ns Fetch and check assertions for snap "core18" (2128) / Fetch and check assertions for snap "core18" (2128) - Fetch and check assertions for snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Mount snap "core18" (2128) | Mount snap "core18" (2128) / Mount snap "core18" (2128) - Mount snap "core18" (2128) \ Setup snap "core18" (2128) security profiles | Set automatic aliases for snap "core18" / Download snap "juju" (17062) from channel "2.9/stable" - Download snap "juju" (17062) from channel "2.9/stable" \ Download snap "juju" (17062) from channel "2.9/stable" |[?25l Download snap "juju" (17062) from channel "2.9/stable" 0% 0B/s ages! Download snap "juju" (17062) from channel "2.9/stable" 0% 0B/s ages! Download snap "juju" (17062) from channel "2.9/stable" 0% 559kB/s 3m06s Download snap "juju" (17062) from channel "2.9/stable" 0% 873kB/s 1m59s Download snap "juju" (17062) from channel "2.9/stable" 1% 1.05MB/s 1m39s Download snap "juju" (17062) from channel "2.9/stable" 1% 1.20MB/s 1m26s Download snap "juju" (17062) from channel "2.9/stable" 1% 1.30MB/s 1m19s Download snap "juju" (17062) from channel "2.9/stable" 1% 1.59MB/s 1m04s Download snap "juju" (17062) from channel "2.9/stable" 2% 1.84MB/s 55.5s Download snap "juju" (17062) from channel "2.9/stable" 2% 2.22MB/s 45.7s Download snap "juju" (17062) from channel "2.9/stable" 3% 2.55MB/s 39.6s Download snap "juju" (17062) from channel "2.9/stable" 4% 2.98MB/s 33.6s Download snap "juju" (17062) from channel "2.9/stable" 5% 3.49MB/s 28.4s Download snap "juju" (17062) from channel "2.9/stable" 6% 4.07MB/s 24.0s Download snap "juju" (17062) from channel "2.9/stable" 8% 4.83MB/s 19.9s Download snap "juju" (17062) from channel "2.9/stable" 9% 5.25MB/s 18.1s Download snap "juju" (17062) from channel "2.9/stable" 12% 6.76MB/s 13.5s Download snap "juju" (17062) from channel "2.9/stable" 14% 7.17MB/s 12.5s Download snap "juju" (17062) from channel "2.9/stable" 16% 8.02MB/s 10.9s Download snap "juju" (17062) from channel "2.9/stable" 19% 8.97MB/s 9.40s Download snap "juju" (17062) from channel "2.9/stable" 23% 10.1MB/s 7.98s Download snap "juju" (17062) from channel "2.9/stable" 26% 11.0MB/s 7.05s Download snap "juju" (17062) from channel "2.9/stable" 28% 11.7MB/s 6.37s Download snap "juju" (17062) from channel "2.9/stable" 31% 12.1MB/s 5.96s Download snap "juju" (17062) from channel "2.9/stable"  34% 13.0MB/s 5.29s Download snap "juju" (17062) from channel "2.9/stable"  37% 13.7MB/s 4.77s Download snap "juju" (17062) from channel "2.9/stable"  41% 14.3MB/s 4.32s Download snap "juju" (17062) from channel "2.9/stable"  45% 15.2MB/s 3.80s Download snap "juju" (17062) from channel "2.9/stable"  48% 15.8MB/s 3.42s Download snap "juju" (17062) from channel "2.9/stable"  53% 16.6MB/s 2.98s Download snap "juju" (17062) from channel "2.9/stable"  58% 17.7MB/s 2.50s Download snap "juju" (17062) from channel "2.9/stable"  61% 18.2MB/s 2.22s Download snap "juju" (17062) from channel "2.9/stable"  65% 18.8MB/s 1.95s Download snap "juju" (17062) from channel "2.9/stable"  69% 19.3MB/s 1.68s Download snap "juju" (17062) from channel "2.9/stable"  73% 19.9MB/s 1.42s Download snap "juju" (17062) from channel "2.9/stable"  76% 20.2MB/s 1.24s Download snap "juju" (17062) from channel "2.9/stable"  79% 20.5MB/s 1.06s Download snap "juju" (17062) from channel "2.9/stable"  83% 21.0MB/s 855ms Download snap "juju" (17062) from channel "2.9/stable"  86% 21.3MB/s 660ms Download snap "juju" (17062) from channel "2.9/stable" 91% 21.8MB/s 451ms Download snap "juju" (17062) from channel "2.9/stable" 95% 22.3MB/s 242ms Download snap "juju" (17062) from channel "2.9/stable" 99% 22.8MB/s 41ms Download snap "juju" (17062) from channel "2.9/stable" 100% 22.5MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 22.0MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 21.5MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 21.0MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 20.3MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 19.8MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 19.4MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 19.0MB/s 0.0ns Download snap "juju" (17062) from channel "2.9/stable" 100% 18.6MB/s 0.0ns Fetch and check assertions for snap "juju" (17062) / Fetch and check assertions for snap "juju" (17062) - Fetch and check assertions for snap "juju" (17062) \ Mount snap "juju" (17062) | Mount snap "juju" (17062) / Mount snap "juju" (17062) - Mount snap "juju" (17062) \ Mount snap "juju" (17062) | Mount snap "juju" (17062) / Mount snap "juju" (17062) - Setup snap "juju" (17062) security profiles \ Setup snap "juju" (17062) security profiles | Start snap "juju" (17062) services / Start snap "juju" (17062) services - Start snap "juju" (17062) services \ Start snap "juju" (17062) services | Run configure hook of "juju" snap if present / [?25hjuju (2.9/stable) 2.9.12 from Canonical* installed +no crontab for ubuntu ++ LAYER_BASIC='gcc build-essential python3-pip python3-setuptools python3-yaml' ++ TRUSTY_PACKAGES=python-virtualenv ++ XENIAL_PACKAGES=virtualenv ++ BIONIC_PACKAGES=virtualenv ++ DOWNLOAD_PACKAGES= ++ CLOUD_INIT_PACKAGES='curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan' ++ PACKAGES='gcc build-essential python3-pip python3-setuptools python3-yaml ' +++ juju version +Since Juju 2 is being run for the first time, downloaded the latest public cloud information. ++ JUJU_FULL_VERSION=2.9.12-ubuntu-amd64 +++ echo 2.9.12-ubuntu-amd64 +++ awk -F- '{print $1}' ++ JUJU_VERSION=2.9.12 +++ echo 2.9.12-ubuntu-amd64 +++ awk -F- '{print $2}' ++ OS_VERSION=ubuntu +++ echo 2.9.12-ubuntu-amd64 +++ awk -F- '{print $3}' ++ ARCH=amd64 ++ '[' 1 == 0 ']' ++ '[' 0 == 1 ']' ++ '[' 1 == 1 ']' ++ cache xenial virtualenv ++ series=xenial ++ container=juju-xenial-base ++ alias=juju/xenial/amd64 ++ lxc delete juju-xenial-base -f +Error: not found ++ true ++ lxc image copy ubuntu:xenial local: --alias clean-xenial + Copying the image: metadata: 100% (1.18GB/s) Copying the image: rootfs: 1% (2.26MB/s) Copying the image: rootfs: 2% (3.95MB/s) Copying the image: rootfs: 3% (5.49MB/s) Copying the image: rootfs: 4% (7.00MB/s) Copying the image: rootfs: 5% (8.18MB/s) Copying the image: rootfs: 6% (9.16MB/s) Copying the image: rootfs: 7% (10.25MB/s) Copying the image: rootfs: 8% (11.12MB/s) Copying the image: rootfs: 9% (12.00MB/s) Copying the image: rootfs: 10% (12.70MB/s) Copying the image: rootfs: 10% (13.50MB/s) Copying the image: rootfs: 11% (13.83MB/s) Copying the image: rootfs: 12% (14.25MB/s) Copying the image: rootfs: 13% (14.87MB/s) Copying the image: rootfs: 14% (15.06MB/s) Copying the image: rootfs: 15% (15.83MB/s) Copying the image: rootfs: 16% (15.91MB/s) Copying the image: rootfs: 17% (15.76MB/s) Copying the image: rootfs: 18% (15.82MB/s) Copying the image: rootfs: 19% (16.37MB/s) Copying the image: rootfs: 20% (16.34MB/s) Copying the image: rootfs: 20% (16.47MB/s) Copying the image: rootfs: 21% (16.50MB/s) Copying the image: rootfs: 22% (16.61MB/s) Copying the image: rootfs: 23% (17.01MB/s) Copying the image: rootfs: 24% (16.11MB/s) Copying the image: rootfs: 25% (16.43MB/s) Copying the image: rootfs: 26% (16.58MB/s) Copying the image: rootfs: 27% (16.79MB/s) Copying the image: rootfs: 28% (17.09MB/s) Copying the image: rootfs: 29% (17.27MB/s) Copying the image: rootfs: 30% (17.27MB/s) Copying the image: rootfs: 31% (17.61MB/s) Copying the image: rootfs: 31% (17.67MB/s) Copying the image: rootfs: 32% (17.60MB/s) Copying the image: rootfs: 33% (17.53MB/s) Copying the image: rootfs: 34% (17.40MB/s) Copying the image: rootfs: 35% (17.35MB/s) Copying the image: rootfs: 36% (17.59MB/s) Copying the image: rootfs: 37% (17.68MB/s) Copying the image: rootfs: 38% (17.62MB/s) Copying the image: rootfs: 39% (17.81MB/s) Copying the image: rootfs: 40% (17.90MB/s) Copying the image: rootfs: 41% (17.97MB/s) Copying the image: rootfs: 41% (18.01MB/s) Copying the image: rootfs: 42% (18.19MB/s) Copying the image: rootfs: 43% (18.31MB/s) Copying the image: rootfs: 44% (18.47MB/s) Copying the image: rootfs: 45% (18.55MB/s) Copying the image: rootfs: 46% (18.74MB/s) Copying the image: rootfs: 47% (18.80MB/s) Copying the image: rootfs: 48% (18.99MB/s) Copying the image: rootfs: 49% (19.06MB/s) Copying the image: rootfs: 50% (19.22MB/s) Copying the image: rootfs: 51% (19.07MB/s) Copying the image: rootfs: 51% (19.27MB/s) Copying the image: rootfs: 52% (19.33MB/s) Copying the image: rootfs: 53% (19.41MB/s) Copying the image: rootfs: 54% (19.49MB/s) Copying the image: rootfs: 55% (19.63MB/s) Copying the image: rootfs: 56% (19.70MB/s) Copying the image: rootfs: 57% (19.81MB/s) Copying the image: rootfs: 58% (19.86MB/s) Copying the image: rootfs: 59% (19.99MB/s) Copying the image: rootfs: 60% (20.08MB/s) Copying the image: rootfs: 61% (20.11MB/s) Copying the image: rootfs: 61% (20.23MB/s) Copying the image: rootfs: 62% (20.28MB/s) Copying the image: rootfs: 63% (20.39MB/s) Copying the image: rootfs: 64% (20.45MB/s) Copying the image: rootfs: 65% (20.51MB/s) Copying the image: rootfs: 66% (20.61MB/s) Copying the image: rootfs: 67% (20.69MB/s) Copying the image: rootfs: 68% (20.68MB/s) Copying the image: rootfs: 69% (20.65MB/s) Copying the image: rootfs: 70% (20.80MB/s) Copying the image: rootfs: 71% (20.73MB/s) Copying the image: rootfs: 72% (20.79MB/s) Copying the image: rootfs: 72% (20.88MB/s) Copying the image: rootfs: 73% (20.92MB/s) Copying the image: rootfs: 74% (20.98MB/s) Copying the image: rootfs: 75% (21.03MB/s) Copying the image: rootfs: 76% (21.06MB/s) Copying the image: rootfs: 77% (21.16MB/s) Copying the image: rootfs: 78% (21.22MB/s) Copying the image: rootfs: 79% (21.29MB/s) Copying the image: rootfs: 80% (21.27MB/s) Copying the image: rootfs: 81% (21.40MB/s) Copying the image: rootfs: 82% (21.42MB/s) Copying the image: rootfs: 82% (21.50MB/s) Copying the image: rootfs: 83% (21.54MB/s) Copying the image: rootfs: 84% (21.61MB/s) Copying the image: rootfs: 85% (21.62MB/s) Copying the image: rootfs: 86% (21.59MB/s) Copying the image: rootfs: 87% (21.68MB/s) Copying the image: rootfs: 88% (21.72MB/s) Copying the image: rootfs: 89% (21.72MB/s) Copying the image: rootfs: 90% (21.77MB/s) Copying the image: rootfs: 91% (21.63MB/s) Copying the image: rootfs: 92% (21.50MB/s) Copying the image: rootfs: 92% (21.44MB/s) Copying the image: rootfs: 93% (21.38MB/s) Copying the image: rootfs: 94% (21.49MB/s) Copying the image: rootfs: 95% (21.48MB/s) Copying the image: rootfs: 96% (21.61MB/s) Copying the image: rootfs: 97% (21.57MB/s) Copying the image: rootfs: 98% (21.58MB/s) Copying the image: rootfs: 99% (21.64MB/s) Copying the image: rootfs: 100% (21.60MB/s) Image copied successfully! ++ lxc launch ubuntu:xenial juju-xenial-base +Creating juju-xenial-base + Retrieving image: Unpack: 100% (4.33GB/s) Retrieving image: Unpack: 100% (4.33GB/s) Starting juju-xenial-base + Remapping container filesystem + sleep 15 ++ lxc exec juju-xenial-base -- apt-get update -y +Hit:1 http://archive.ubuntu.com/ubuntu xenial InRelease +Get:2 http://archive.ubuntu.com/ubuntu xenial-updates InRelease [109 kB] +Get:3 http://security.ubuntu.com/ubuntu xenial-security InRelease [109 kB] +Get:4 http://archive.ubuntu.com/ubuntu xenial-backports InRelease [107 kB] +Get:5 http://archive.ubuntu.com/ubuntu xenial/universe amd64 Packages [7532 kB] +Get:6 http://archive.ubuntu.com/ubuntu xenial/universe Translation-en [4354 kB] +Get:7 http://archive.ubuntu.com/ubuntu xenial/multiverse amd64 Packages [144 kB] +Get:8 http://archive.ubuntu.com/ubuntu xenial/multiverse Translation-en [106 kB] +Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages [2049 kB] +Get:10 http://archive.ubuntu.com/ubuntu xenial-updates/main Translation-en [482 kB] +Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 Packages [1219 kB] +Get:12 http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages [1648 kB] +Get:13 http://archive.ubuntu.com/ubuntu xenial-updates/universe Translation-en [358 kB] +Get:14 http://archive.ubuntu.com/ubuntu xenial-updates/multiverse amd64 Packages [22.6 kB] +Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/multiverse Translation-en [8476 B] +Get:16 http://archive.ubuntu.com/ubuntu xenial-backports/main amd64 Packages [9812 B] +Get:17 http://archive.ubuntu.com/ubuntu xenial-backports/main Translation-en [4456 B] +Get:18 http://archive.ubuntu.com/ubuntu xenial-backports/universe amd64 Packages [11.3 kB] +Get:19 http://archive.ubuntu.com/ubuntu xenial-backports/universe Translation-en [4476 B] +Get:20 http://security.ubuntu.com/ubuntu xenial-security/main Translation-en [380 kB] +Get:21 http://security.ubuntu.com/ubuntu xenial-security/universe amd64 Packages [785 kB] +Get:22 http://security.ubuntu.com/ubuntu xenial-security/universe Translation-en [225 kB] +Get:23 http://security.ubuntu.com/ubuntu xenial-security/multiverse amd64 Packages [7864 B] +Get:24 http://security.ubuntu.com/ubuntu xenial-security/multiverse Translation-en [2672 B] +Fetched 19.7 MB in 5s (3862 kB/s) +Reading package lists... ++ lxc exec juju-xenial-base -- apt-get upgrade -y +Reading package lists... +Building dependency tree... +Reading state information... +Calculating upgrade... +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following packages have been kept back: + ubuntu-advantage-tools update-notifier-common +The following packages will be upgraded: + apt apt-transport-https apt-utils bind9-host distro-info-data dnsutils + libapt-inst2.0 libapt-pkg5.0 libbind9-140 libc-bin libc6 libdns-export162 + libdns162 libisc-export160 libisc160 libisccc140 libisccfg140 liblwres141 + libssl1.0.0 locales multiarch-support openssl python-apt-common python3-apt + sosreport +25 upgraded, 0 newly installed, 0 to remove and 2 not upgraded. +Need to get 12.6 MB of archives. +After this operation, 48.1 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc6 amd64 2.23-0ubuntu11.3 [2590 kB] +Get:2 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 locales all 2.23-0ubuntu11.3 [3197 kB] +Get:3 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc-bin amd64 2.23-0ubuntu11.3 [629 kB] +Get:4 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libapt-pkg5.0 amd64 1.2.35 [715 kB] +Get:5 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libapt-inst2.0 amd64 1.2.35 [54.8 kB] +Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apt amd64 1.2.35 [1107 kB] +Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apt-utils amd64 1.2.35 [196 kB] +Get:8 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 multiarch-support amd64 2.23-0ubuntu11.3 [6830 B] +Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 distro-info-data all 0.28ubuntu0.18 [4530 B] +Get:10 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisc-export160 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [153 kB] +Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdns-export162 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [665 kB] +Get:12 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libssl1.0.0 amd64 1.0.2g-1ubuntu4.20 [1083 kB] +Get:13 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 apt-transport-https amd64 1.2.35 [26.6 kB] +Get:14 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 bind9-host amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [38.3 kB] +Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 dnsutils amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [88.9 kB] +Get:16 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisc160 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [215 kB] +Get:17 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdns162 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [872 kB] +Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisccc140 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [16.3 kB] +Get:19 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisccfg140 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [40.5 kB] +Get:20 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 liblwres141 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [33.9 kB] +Get:21 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libbind9-140 amd64 1:9.10.3.dfsg.P4-8ubuntu1.19 [23.6 kB] +Get:22 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssl amd64 1.0.2g-1ubuntu4.20 [492 kB] +Get:23 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python-apt-common all 1.1.0~beta1ubuntu0.16.04.12 [16.7 kB] +Get:24 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3-apt amd64 1.1.0~beta1ubuntu0.16.04.12 [145 kB] +Get:25 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 sosreport amd64 3.9.1-1ubuntu0.16.04.2 [170 kB] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 12.6 MB in 0s (18.9 MB/s) +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../libc6_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc6:amd64 (2.23-0ubuntu11.3) over (2.23-0ubuntu11.2) ... +Setting up libc6:amd64 (2.23-0ubuntu11.3) ... +Processing triggers for libc-bin (2.23-0ubuntu11.2) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../locales_2.23-0ubuntu11.3_all.deb ... +Unpacking locales (2.23-0ubuntu11.3) over (2.23-0ubuntu11.2) ... +Preparing to unpack .../libc-bin_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc-bin (2.23-0ubuntu11.3) over (2.23-0ubuntu11.2) ... +Processing triggers for man-db (2.7.5-1) ... +Setting up libc-bin (2.23-0ubuntu11.3) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../libapt-pkg5.0_1.2.35_amd64.deb ... +Unpacking libapt-pkg5.0:amd64 (1.2.35) over (1.2.32ubuntu0.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Setting up libapt-pkg5.0:amd64 (1.2.35) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../libapt-inst2.0_1.2.35_amd64.deb ... +Unpacking libapt-inst2.0:amd64 (1.2.35) over (1.2.32ubuntu0.2) ... +Preparing to unpack .../archives/apt_1.2.35_amd64.deb ... +Unpacking apt (1.2.35) over (1.2.32ubuntu0.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for man-db (2.7.5-1) ... +Setting up apt (1.2.35) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../apt-utils_1.2.35_amd64.deb ... +Unpacking apt-utils (1.2.35) over (1.2.32ubuntu0.2) ... +Preparing to unpack .../multiarch-support_2.23-0ubuntu11.3_amd64.deb ... +Unpacking multiarch-support (2.23-0ubuntu11.3) over (2.23-0ubuntu11.2) ... +Processing triggers for man-db (2.7.5-1) ... +Setting up multiarch-support (2.23-0ubuntu11.3) ... +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../distro-info-data_0.28ubuntu0.18_all.deb ... +Unpacking distro-info-data (0.28ubuntu0.18) over (0.28ubuntu0.17) ... +Preparing to unpack .../libisc-export160_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libdns-export162_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libssl1.0.0_1.0.2g-1ubuntu4.20_amd64.deb ... +Unpacking libssl1.0.0:amd64 (1.0.2g-1ubuntu4.20) over (1.0.2g-1ubuntu4.19) ... +Preparing to unpack .../apt-transport-https_1.2.35_amd64.deb ... +Unpacking apt-transport-https (1.2.35) over (1.2.32ubuntu0.2) ... +Preparing to unpack .../bind9-host_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking bind9-host (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../dnsutils_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking dnsutils (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libisc160_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libisc160:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libdns162_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libdns162:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libisccc140_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libisccc140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libisccfg140_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libisccfg140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../liblwres141_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking liblwres141:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../libbind9-140_1%3a9.10.3.dfsg.P4-8ubuntu1.19_amd64.deb ... +Unpacking libbind9-140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) over (1:9.10.3.dfsg.P4-8ubuntu1.18) ... +Preparing to unpack .../openssl_1.0.2g-1ubuntu4.20_amd64.deb ... +Unpacking openssl (1.0.2g-1ubuntu4.20) over (1.0.2g-1ubuntu4.19) ... +Preparing to unpack .../python-apt-common_1.1.0~beta1ubuntu0.16.04.12_all.deb ... +Unpacking python-apt-common (1.1.0~beta1ubuntu0.16.04.12) over (1.1.0~beta1ubuntu0.16.04.11) ... +Preparing to unpack .../python3-apt_1.1.0~beta1ubuntu0.16.04.12_amd64.deb ... +Unpacking python3-apt (1.1.0~beta1ubuntu0.16.04.12) over (1.1.0~beta1ubuntu0.16.04.11) ... +Preparing to unpack .../sosreport_3.9.1-1ubuntu0.16.04.2_amd64.deb ... +Unpacking sosreport (3.9.1-1ubuntu0.16.04.2) over (3.9.1-1ubuntu0.16.04.1) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for man-db (2.7.5-1) ... +Setting up locales (2.23-0ubuntu11.3) ... +Generating locales (this might take a while)... + en_US.UTF-8... done +Generation complete. +Setting up libapt-inst2.0:amd64 (1.2.35) ... +Setting up apt-utils (1.2.35) ... +Setting up distro-info-data (0.28ubuntu0.18) ... +Setting up libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libssl1.0.0:amd64 (1.0.2g-1ubuntu4.20) ... +Setting up apt-transport-https (1.2.35) ... +Setting up libisc160:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libdns162:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libisccc140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libisccfg140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up libbind9-140:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up liblwres141:amd64 (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up bind9-host (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up dnsutils (1:9.10.3.dfsg.P4-8ubuntu1.19) ... +Setting up openssl (1.0.2g-1ubuntu4.20) ... +Setting up python-apt-common (1.1.0~beta1ubuntu0.16.04.12) ... +Setting up python3-apt (1.1.0~beta1ubuntu0.16.04.12) ... +Setting up sosreport (3.9.1-1ubuntu0.16.04.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... ++ lxc exec juju-xenial-base -- apt-get install -y curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan gcc build-essential python3-pip python3-setuptools python3-yaml virtualenv +Reading package lists... +Building dependency tree... +Reading state information... +python3-yaml is already the newest version (3.11-3build1). +tmux is already the newest version (2.1-3build1). +curl is already the newest version (7.47.0-1ubuntu2.19). +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following additional packages will be installed: + binutils cloud-image-utils cpp cpp-5 distro-info dpkg-dev fakeroot g++ g++-5 + gcc-5 genisoimage libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan2 libatomic1 libboost-iostreams1.58.0 + libboost-random1.58.0 libboost-system1.58.0 libboost-thread1.58.0 + libc-dev-bin libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev + libfakeroot libfile-fcntllock-perl libgcc-5-dev libgomp1 libiscsi2 libisl15 + libitm1 liblsan0 libmpc3 libmpx0 libnspr4 libnss3 libnss3-nssdb + libpython3-dev libpython3.5-dev libquadmath0 librados2 librbd1 + libstdc++-5-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-dev python3-virtualenv python3-wheel + python3.5-dev qemu-block-extra qemu-utils sharutils +Suggested packages: + binutils-doc cloud-utils-euca cpp-doc gcc-5-locales shunit2 debian-keyring + g++-multilib g++-5-multilib gcc-5-doc libstdc++6-5-dbg gcc-multilib autoconf + automake libtool flex bison gdb gcc-doc gcc-5-multilib libgcc1-dbg + libgomp1-dbg libitm1-dbg libatomic1-dbg libasan2-dbg liblsan0-dbg + libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx0-dbg libquadmath0-dbg + wodim cdrkit-doc glibc-doc libstdc++-5-doc make-doc python-setuptools-doc + debootstrap bsd-mailx | mailx +The following NEW packages will be installed: + binutils bridge-utils build-essential cloud-image-utils cloud-utils cpp + cpp-5 cpu-checker distro-info dpkg-dev fakeroot g++ g++-5 gcc gcc-5 + genisoimage libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan2 libatomic1 libboost-iostreams1.58.0 + libboost-random1.58.0 libboost-system1.58.0 libboost-thread1.58.0 + libc-dev-bin libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev + libfakeroot libfile-fcntllock-perl libgcc-5-dev libgomp1 libiscsi2 libisl15 + libitm1 liblsan0 libmpc3 libmpx0 libnspr4 libnss3 libnss3-nssdb + libpython3-dev libpython3.5-dev libquadmath0 librados2 librbd1 + libstdc++-5-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-dev python3-pip python3-setuptools + python3-virtualenv python3-wheel python3.5-dev qemu-block-extra qemu-utils + sharutils ubuntu-fan virtualenv +0 upgraded, 69 newly installed, 0 to remove and 2 not upgraded. +Need to get 84.4 MB of archives. +After this operation, 226 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu xenial/main amd64 libiscsi2 amd64 1.12.0-2 [51.5 kB] +Get:2 http://archive.ubuntu.com/ubuntu xenial/main amd64 libmpc3 amd64 1.0.3-1 [39.7 kB] +Get:3 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 binutils amd64 2.26.1-1ubuntu1~16.04.8 [2312 kB] +Get:4 http://archive.ubuntu.com/ubuntu xenial/main amd64 bridge-utils amd64 1.5-9ubuntu1 [28.6 kB] +Get:5 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc-dev-bin amd64 2.23-0ubuntu11.3 [68.6 kB] +Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 linux-libc-dev amd64 4.4.0-210.242 [832 kB] +Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libc6-dev amd64 2.23-0ubuntu11.3 [2083 kB] +Get:8 http://archive.ubuntu.com/ubuntu xenial/main amd64 libisl15 amd64 0.16.1-1 [524 kB] +Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cpp-5 amd64 5.4.0-6ubuntu1~16.04.12 [7783 kB] +Get:10 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpp amd64 4:5.3.1-1ubuntu1 [27.7 kB] +Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libcc1-0 amd64 5.4.0-6ubuntu1~16.04.12 [38.8 kB] +Get:12 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgomp1 amd64 5.4.0-6ubuntu1~16.04.12 [55.2 kB] +Get:13 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libitm1 amd64 5.4.0-6ubuntu1~16.04.12 [27.4 kB] +Get:14 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libatomic1 amd64 5.4.0-6ubuntu1~16.04.12 [8892 B] +Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libasan2 amd64 5.4.0-6ubuntu1~16.04.12 [265 kB] +Get:16 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 liblsan0 amd64 5.4.0-6ubuntu1~16.04.12 [105 kB] +Get:17 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libtsan0 amd64 5.4.0-6ubuntu1~16.04.12 [244 kB] +Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libubsan0 amd64 5.4.0-6ubuntu1~16.04.12 [95.3 kB] +Get:19 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libcilkrts5 amd64 5.4.0-6ubuntu1~16.04.12 [40.0 kB] +Get:20 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libmpx0 amd64 5.4.0-6ubuntu1~16.04.12 [9762 B] +Get:21 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libquadmath0 amd64 5.4.0-6ubuntu1~16.04.12 [131 kB] +Get:22 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libgcc-5-dev amd64 5.4.0-6ubuntu1~16.04.12 [2239 kB] +Get:23 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 gcc-5 amd64 5.4.0-6ubuntu1~16.04.12 [8612 kB] +Get:24 http://archive.ubuntu.com/ubuntu xenial/main amd64 gcc amd64 4:5.3.1-1ubuntu1 [5244 B] +Get:25 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libstdc++-5-dev amd64 5.4.0-6ubuntu1~16.04.12 [1428 kB] +Get:26 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 g++-5 amd64 5.4.0-6ubuntu1~16.04.12 [8430 kB] +Get:27 http://archive.ubuntu.com/ubuntu xenial/main amd64 g++ amd64 4:5.3.1-1ubuntu1 [1504 B] +Get:28 http://archive.ubuntu.com/ubuntu xenial/main amd64 make amd64 4.1-6 [151 kB] +Get:29 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdpkg-perl all 1.18.4ubuntu1.7 [195 kB] +Get:30 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 dpkg-dev all 1.18.4ubuntu1.7 [584 kB] +Get:31 http://archive.ubuntu.com/ubuntu xenial/main amd64 build-essential amd64 12.1ubuntu2 [4758 B] +Get:32 http://archive.ubuntu.com/ubuntu xenial/main amd64 msr-tools amd64 1.3-2 [10.6 kB] +Get:33 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpu-checker amd64 0.7-0ubuntu7 [6862 B] +Get:34 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 distro-info amd64 0.14ubuntu0.2 [20.1 kB] +Get:35 http://archive.ubuntu.com/ubuntu xenial/main amd64 libfakeroot amd64 1.20.2-1ubuntu1 [25.5 kB] +Get:36 http://archive.ubuntu.com/ubuntu xenial/main amd64 fakeroot amd64 1.20.2-1ubuntu1 [61.8 kB] +Get:37 http://archive.ubuntu.com/ubuntu xenial/main amd64 genisoimage amd64 9:1.1.11-3ubuntu1 [316 kB] +Get:38 http://archive.ubuntu.com/ubuntu xenial/main amd64 libaio1 amd64 0.3.110-2 [6356 B] +Get:39 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:40 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-diff-xs-perl amd64 0.04-4build1 [11.0 kB] +Get:41 http://archive.ubuntu.com/ubuntu xenial/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:42 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-iostreams1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [29.0 kB] +Get:43 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-system1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [9146 B] +Get:44 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-random1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [11.7 kB] +Get:45 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libboost-thread1.58.0 amd64 1.58.0+dfsg-5ubuntu3.1 [47.0 kB] +Get:46 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libexpat1-dev amd64 2.1.0-7ubuntu0.16.04.5 [115 kB] +Get:47 http://archive.ubuntu.com/ubuntu xenial/main amd64 libfile-fcntllock-perl amd64 0.22-3 [32.0 kB] +Get:48 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnspr4 amd64 2:4.13.1-0ubuntu0.16.04.1 [112 kB] +Get:49 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnss3-nssdb all 2:3.28.4-0ubuntu0.16.04.14 [10.6 kB] +Get:50 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libnss3 amd64 2:3.28.4-0ubuntu0.16.04.14 [1232 kB] +Get:51 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libpython3.5-dev amd64 3.5.2-2ubuntu0~16.04.13 [37.3 MB] +Get:52 http://archive.ubuntu.com/ubuntu xenial/main amd64 libpython3-dev amd64 3.5.1-3 [6926 B] +Get:53 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 librados2 amd64 10.2.11-0ubuntu0.16.04.3 [1651 kB] +Get:54 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 librbd1 amd64 10.2.11-0ubuntu0.16.04.3 [2198 kB] +Get:55 http://archive.ubuntu.com/ubuntu xenial/main amd64 manpages-dev all 4.04-2 [2048 kB] +Get:56 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python-pip-whl all 8.1.1-2ubuntu0.6 [1112 kB] +Get:57 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3.5-dev amd64 3.5.2-2ubuntu0~16.04.13 [413 kB] +Get:58 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-dev amd64 3.5.1-3 [1186 B] +Get:59 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python3-pip all 8.1.1-2ubuntu0.6 [109 kB] +Get:60 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-setuptools all 20.7.0-1 [88.0 kB] +Get:61 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 python3-virtualenv all 15.0.1+ds-3ubuntu1.1 [43.3 kB] +Get:62 http://archive.ubuntu.com/ubuntu xenial/universe amd64 python3-wheel all 0.29.0-1 [48.1 kB] +Get:63 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 qemu-block-extra amd64 1:2.5+dfsg-5ubuntu10.51 [32.4 kB] +Get:64 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 qemu-utils amd64 1:2.5+dfsg-5ubuntu10.51 [582 kB] +Get:65 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 sharutils amd64 1:4.15.2-1ubuntu0.1 [148 kB] +Get:66 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ubuntu-fan all 0.12.8~16.04.3 [35.1 kB] +Get:67 http://archive.ubuntu.com/ubuntu xenial-updates/universe amd64 virtualenv all 15.0.1+ds-3ubuntu1.1 [4346 B] +Get:68 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cloud-image-utils all 0.27-0ubuntu25.2 [16.2 kB] +Get:69 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 cloud-utils all 0.27-0ubuntu25.2 [1514 B] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 84.4 MB in 3s (21.2 MB/s) +Selecting previously unselected package libiscsi2:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 25821 files and directories currently installed.) +Preparing to unpack .../libiscsi2_1.12.0-2_amd64.deb ... +Unpacking libiscsi2:amd64 (1.12.0-2) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../libmpc3_1.0.3-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.0.3-1) ... +Selecting previously unselected package binutils. +Preparing to unpack .../binutils_2.26.1-1ubuntu1~16.04.8_amd64.deb ... +Unpacking binutils (2.26.1-1ubuntu1~16.04.8) ... +Selecting previously unselected package bridge-utils. +Preparing to unpack .../bridge-utils_1.5-9ubuntu1_amd64.deb ... +Unpacking bridge-utils (1.5-9ubuntu1) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../libc-dev-bin_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc-dev-bin (2.23-0ubuntu11.3) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../linux-libc-dev_4.4.0-210.242_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.4.0-210.242) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../libc6-dev_2.23-0ubuntu11.3_amd64.deb ... +Unpacking libc6-dev:amd64 (2.23-0ubuntu11.3) ... +Selecting previously unselected package libisl15:amd64. +Preparing to unpack .../libisl15_0.16.1-1_amd64.deb ... +Unpacking libisl15:amd64 (0.16.1-1) ... +Selecting previously unselected package cpp-5. +Preparing to unpack .../cpp-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking cpp-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package cpp. +Preparing to unpack .../cpp_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking cpp (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../libcc1-0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libcc1-0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../libgomp1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libgomp1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../libitm1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libitm1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../libatomic1_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libatomic1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libasan2:amd64. +Preparing to unpack .../libasan2_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libasan2:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../liblsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking liblsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../libtsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libtsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../libubsan0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libubsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../libcilkrts5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libcilkrts5:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libmpx0:amd64. +Preparing to unpack .../libmpx0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libmpx0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../libquadmath0_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libquadmath0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package libgcc-5-dev:amd64. +Preparing to unpack .../libgcc-5-dev_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libgcc-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package gcc-5. +Preparing to unpack .../gcc-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking gcc-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package gcc. +Preparing to unpack .../gcc_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking gcc (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package libstdc++-5-dev:amd64. +Preparing to unpack .../libstdc++-5-dev_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking libstdc++-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package g++-5. +Preparing to unpack .../g++-5_5.4.0-6ubuntu1~16.04.12_amd64.deb ... +Unpacking g++-5 (5.4.0-6ubuntu1~16.04.12) ... +Selecting previously unselected package g++. +Preparing to unpack .../g++_4%3a5.3.1-1ubuntu1_amd64.deb ... +Unpacking g++ (4:5.3.1-1ubuntu1) ... +Selecting previously unselected package make. +Preparing to unpack .../archives/make_4.1-6_amd64.deb ... +Unpacking make (4.1-6) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../libdpkg-perl_1.18.4ubuntu1.7_all.deb ... +Unpacking libdpkg-perl (1.18.4ubuntu1.7) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../dpkg-dev_1.18.4ubuntu1.7_all.deb ... +Unpacking dpkg-dev (1.18.4ubuntu1.7) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../build-essential_12.1ubuntu2_amd64.deb ... +Unpacking build-essential (12.1ubuntu2) ... +Selecting previously unselected package msr-tools. +Preparing to unpack .../msr-tools_1.3-2_amd64.deb ... +Unpacking msr-tools (1.3-2) ... +Selecting previously unselected package cpu-checker. +Preparing to unpack .../cpu-checker_0.7-0ubuntu7_amd64.deb ... +Unpacking cpu-checker (0.7-0ubuntu7) ... +Selecting previously unselected package distro-info. +Preparing to unpack .../distro-info_0.14ubuntu0.2_amd64.deb ... +Unpacking distro-info (0.14ubuntu0.2) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../libfakeroot_1.20.2-1ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.20.2-1ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../fakeroot_1.20.2-1ubuntu1_amd64.deb ... +Unpacking fakeroot (1.20.2-1ubuntu1) ... +Selecting previously unselected package genisoimage. +Preparing to unpack .../genisoimage_9%3a1.1.11-3ubuntu1_amd64.deb ... +Unpacking genisoimage (9:1.1.11-3ubuntu1) ... +Selecting previously unselected package libaio1:amd64. +Preparing to unpack .../libaio1_0.3.110-2_amd64.deb ... +Unpacking libaio1:amd64 (0.3.110-2) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../libalgorithm-diff-xs-perl_0.04-4build1_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-4build1) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libboost-iostreams1.58.0:amd64. +Preparing to unpack .../libboost-iostreams1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-iostreams1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-system1.58.0:amd64. +Preparing to unpack .../libboost-system1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-system1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-random1.58.0:amd64. +Preparing to unpack .../libboost-random1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-random1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libboost-thread1.58.0:amd64. +Preparing to unpack .../libboost-thread1.58.0_1.58.0+dfsg-5ubuntu3.1_amd64.deb ... +Unpacking libboost-thread1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../libexpat1-dev_2.1.0-7ubuntu0.16.04.5_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.1.0-7ubuntu0.16.04.5) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../libfile-fcntllock-perl_0.22-3_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3) ... +Selecting previously unselected package libnspr4:amd64. +Preparing to unpack .../libnspr4_2%3a4.13.1-0ubuntu0.16.04.1_amd64.deb ... +Unpacking libnspr4:amd64 (2:4.13.1-0ubuntu0.16.04.1) ... +Selecting previously unselected package libnss3-nssdb. +Preparing to unpack .../libnss3-nssdb_2%3a3.28.4-0ubuntu0.16.04.14_all.deb ... +Unpacking libnss3-nssdb (2:3.28.4-0ubuntu0.16.04.14) ... +Selecting previously unselected package libnss3:amd64. +Preparing to unpack .../libnss3_2%3a3.28.4-0ubuntu0.16.04.14_amd64.deb ... +Unpacking libnss3:amd64 (2:3.28.4-0ubuntu0.16.04.14) ... +Selecting previously unselected package libpython3.5-dev:amd64. +Preparing to unpack .../libpython3.5-dev_3.5.2-2ubuntu0~16.04.13_amd64.deb ... +Unpacking libpython3.5-dev:amd64 (3.5.2-2ubuntu0~16.04.13) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../libpython3-dev_3.5.1-3_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.5.1-3) ... +Selecting previously unselected package librados2. +Preparing to unpack .../librados2_10.2.11-0ubuntu0.16.04.3_amd64.deb ... +Unpacking librados2 (10.2.11-0ubuntu0.16.04.3) ... +Selecting previously unselected package librbd1. +Preparing to unpack .../librbd1_10.2.11-0ubuntu0.16.04.3_amd64.deb ... +Unpacking librbd1 (10.2.11-0ubuntu0.16.04.3) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../manpages-dev_4.04-2_all.deb ... +Unpacking manpages-dev (4.04-2) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../python-pip-whl_8.1.1-2ubuntu0.6_all.deb ... +Unpacking python-pip-whl (8.1.1-2ubuntu0.6) ... +Selecting previously unselected package python3.5-dev. +Preparing to unpack .../python3.5-dev_3.5.2-2ubuntu0~16.04.13_amd64.deb ... +Unpacking python3.5-dev (3.5.2-2ubuntu0~16.04.13) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../python3-dev_3.5.1-3_amd64.deb ... +Unpacking python3-dev (3.5.1-3) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../python3-pip_8.1.1-2ubuntu0.6_all.deb ... +Unpacking python3-pip (8.1.1-2ubuntu0.6) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../python3-setuptools_20.7.0-1_all.deb ... +Unpacking python3-setuptools (20.7.0-1) ... +Selecting previously unselected package python3-virtualenv. +Preparing to unpack .../python3-virtualenv_15.0.1+ds-3ubuntu1.1_all.deb ... +Unpacking python3-virtualenv (15.0.1+ds-3ubuntu1.1) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../python3-wheel_0.29.0-1_all.deb ... +Unpacking python3-wheel (0.29.0-1) ... +Selecting previously unselected package qemu-block-extra:amd64. +Preparing to unpack .../qemu-block-extra_1%3a2.5+dfsg-5ubuntu10.51_amd64.deb ... +Unpacking qemu-block-extra:amd64 (1:2.5+dfsg-5ubuntu10.51) ... +Selecting previously unselected package qemu-utils. +Preparing to unpack .../qemu-utils_1%3a2.5+dfsg-5ubuntu10.51_amd64.deb ... +Unpacking qemu-utils (1:2.5+dfsg-5ubuntu10.51) ... +Selecting previously unselected package sharutils. +Preparing to unpack .../sharutils_1%3a4.15.2-1ubuntu0.1_amd64.deb ... +Unpacking sharutils (1:4.15.2-1ubuntu0.1) ... +Selecting previously unselected package ubuntu-fan. +Preparing to unpack .../ubuntu-fan_0.12.8~16.04.3_all.deb ... +Unpacking ubuntu-fan (0.12.8~16.04.3) ... +Selecting previously unselected package virtualenv. +Preparing to unpack .../virtualenv_15.0.1+ds-3ubuntu1.1_all.deb ... +Unpacking virtualenv (15.0.1+ds-3ubuntu1.1) ... +Selecting previously unselected package cloud-image-utils. +Preparing to unpack .../cloud-image-utils_0.27-0ubuntu25.2_all.deb ... +Unpacking cloud-image-utils (0.27-0ubuntu25.2) ... +Selecting previously unselected package cloud-utils. +Preparing to unpack .../cloud-utils_0.27-0ubuntu25.2_all.deb ... +Unpacking cloud-utils (0.27-0ubuntu25.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for man-db (2.7.5-1) ... +Processing triggers for install-info (6.1.0.dfsg.1-5) ... +Processing triggers for ureadahead (0.100.0-19.1) ... +Processing triggers for systemd (229-4ubuntu21.31) ... +Setting up libiscsi2:amd64 (1.12.0-2) ... +Setting up libmpc3:amd64 (1.0.3-1) ... +Setting up binutils (2.26.1-1ubuntu1~16.04.8) ... +Setting up bridge-utils (1.5-9ubuntu1) ... +Setting up libc-dev-bin (2.23-0ubuntu11.3) ... +Setting up linux-libc-dev:amd64 (4.4.0-210.242) ... +Setting up libc6-dev:amd64 (2.23-0ubuntu11.3) ... +Setting up libisl15:amd64 (0.16.1-1) ... +Setting up cpp-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up cpp (4:5.3.1-1ubuntu1) ... +Setting up libcc1-0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libgomp1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libitm1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libatomic1:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libasan2:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up liblsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libtsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libubsan0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libcilkrts5:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libmpx0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libquadmath0:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up libgcc-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up gcc-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up gcc (4:5.3.1-1ubuntu1) ... +Setting up libstdc++-5-dev:amd64 (5.4.0-6ubuntu1~16.04.12) ... +Setting up g++-5 (5.4.0-6ubuntu1~16.04.12) ... +Setting up g++ (4:5.3.1-1ubuntu1) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up make (4.1-6) ... +Setting up libdpkg-perl (1.18.4ubuntu1.7) ... +Setting up dpkg-dev (1.18.4ubuntu1.7) ... +Setting up build-essential (12.1ubuntu2) ... +Setting up msr-tools (1.3-2) ... +Setting up cpu-checker (0.7-0ubuntu7) ... +Setting up distro-info (0.14ubuntu0.2) ... +Setting up libfakeroot:amd64 (1.20.2-1ubuntu1) ... +Setting up fakeroot (1.20.2-1ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up genisoimage (9:1.1.11-3ubuntu1) ... +Setting up libaio1:amd64 (0.3.110-2) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libalgorithm-diff-xs-perl (0.04-4build1) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libboost-iostreams1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-system1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-random1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libboost-thread1.58.0:amd64 (1.58.0+dfsg-5ubuntu3.1) ... +Setting up libexpat1-dev:amd64 (2.1.0-7ubuntu0.16.04.5) ... +Setting up libfile-fcntllock-perl (0.22-3) ... +Setting up libnspr4:amd64 (2:4.13.1-0ubuntu0.16.04.1) ... +Setting up libpython3.5-dev:amd64 (3.5.2-2ubuntu0~16.04.13) ... +Setting up libpython3-dev:amd64 (3.5.1-3) ... +Setting up manpages-dev (4.04-2) ... +Setting up python-pip-whl (8.1.1-2ubuntu0.6) ... +Setting up python3.5-dev (3.5.2-2ubuntu0~16.04.13) ... +Setting up python3-dev (3.5.1-3) ... +Setting up python3-pip (8.1.1-2ubuntu0.6) ... +Setting up python3-setuptools (20.7.0-1) ... +Setting up python3-virtualenv (15.0.1+ds-3ubuntu1.1) ... +Setting up python3-wheel (0.29.0-1) ... +Setting up sharutils (1:4.15.2-1ubuntu0.1) ... +Setting up ubuntu-fan (0.12.8~16.04.3) ... +Setting up virtualenv (15.0.1+ds-3ubuntu1.1) ... +Setting up libnss3-nssdb (2:3.28.4-0ubuntu0.16.04.14) ... +Setting up libnss3:amd64 (2:3.28.4-0ubuntu0.16.04.14) ... +Setting up librados2 (10.2.11-0ubuntu0.16.04.3) ... +Setting up librbd1 (10.2.11-0ubuntu0.16.04.3) ... +Setting up qemu-block-extra:amd64 (1:2.5+dfsg-5ubuntu10.51) ... +Setting up qemu-utils (1:2.5+dfsg-5ubuntu10.51) ... +Setting up cloud-image-utils (0.27-0ubuntu25.2) ... +Setting up cloud-utils (0.27-0ubuntu25.2) ... +Processing triggers for libc-bin (2.23-0ubuntu11.3) ... +Processing triggers for ureadahead (0.100.0-19.1) ... +Processing triggers for systemd (229-4ubuntu21.31) ... ++ echo 'Installing Juju agent 2.9.12-ubuntu-amd64' +Installing Juju agent 2.9.12-ubuntu-amd64 ++ lxc exec juju-xenial-base -- mkdir -p /var/lib/juju/tools/2.9.12-ubuntu-amd64/ ++ lxc exec juju-xenial-base -- curl -sS --connect-timeout 20 --noproxy '*' --insecure -o /var/lib/juju/tools/2.9.12-ubuntu-amd64/tools.tar.gz https://streams.canonical.com/juju/tools/agent/2.9.12/juju-2.9.12-ubuntu-amd64.tgz ++ lxc exec juju-xenial-base -- tar zxf /var/lib/juju/tools/2.9.12-ubuntu-amd64/tools.tar.gz -C /var/lib/juju/tools/2.9.12-ubuntu-amd64 ++ lxc stop juju-xenial-base ++ lxc image delete juju/xenial/amd64 +Error: not found ++ true ++ lxc image delete clean-xenial +++ date +%Y%m%d ++ lxc publish juju-xenial-base --alias juju/xenial/amd64 'description=xenial juju dev image (20210906)' + Publishing instance: Image pack: 1% (6.79MB/s) Publishing instance: Image pack: 2% (5.64MB/s) Publishing instance: Image pack: 3% (6.19MB/s) Publishing instance: Image pack: 4% (7.10MB/s) Publishing instance: Image pack: 5% (7.01MB/s) Publishing instance: Image pack: 6% (6.88MB/s) Publishing instance: Image pack: 7% (7.05MB/s) Publishing instance: Image pack: 8% (7.30MB/s) Publishing instance: Image pack: 9% (7.27MB/s) Publishing instance: Image pack: 10% (7.49MB/s) Publishing instance: Image pack: 10% (7.79MB/s) Publishing instance: Image pack: 11% (8.02MB/s) Publishing instance: Image pack: 12% (8.28MB/s) Publishing instance: Image pack: 13% (8.50MB/s) Publishing instance: Image pack: 14% (8.50MB/s) Publishing instance: Image pack: 15% (8.45MB/s) Publishing instance: Image pack: 16% (8.26MB/s) Publishing instance: Image pack: 17% (8.28MB/s) Publishing instance: Image pack: 18% (8.41MB/s) Publishing instance: Image pack: 19% (8.48MB/s) Publishing instance: Image pack: 19% (8.65MB/s) Publishing instance: Image pack: 20% (8.81MB/s) Publishing instance: Image pack: 21% (8.72MB/s) Publishing instance: Image pack: 22% (8.72MB/s) Publishing instance: Image pack: 23% (8.79MB/s) Publishing instance: Image pack: 24% (8.76MB/s) Publishing instance: Image pack: 25% (8.81MB/s) Publishing instance: Image pack: 26% (8.76MB/s) Publishing instance: Image pack: 27% (8.80MB/s) Publishing instance: Image pack: 28% (8.89MB/s) Publishing instance: Image pack: 28% (9.01MB/s) Publishing instance: Image pack: 29% (9.12MB/s) Publishing instance: Image pack: 30% (9.26MB/s) Publishing instance: Image pack: 31% (9.36MB/s) Publishing instance: Image pack: 32% (9.30MB/s) Publishing instance: Image pack: 33% (9.28MB/s) Publishing instance: Image pack: 34% (9.20MB/s) Publishing instance: Image pack: 35% (9.20MB/s) Publishing instance: Image pack: 36% (9.18MB/s) Publishing instance: Image pack: 37% (9.08MB/s) Publishing instance: Image pack: 37% (8.94MB/s) Publishing instance: Image pack: 38% (8.97MB/s) Publishing instance: Image pack: 39% (8.88MB/s) Publishing instance: Image pack: 40% (8.85MB/s) Publishing instance: Image pack: 41% (8.75MB/s) Publishing instance: Image pack: 42% (8.74MB/s) Publishing instance: Image pack: 43% (8.71MB/s) Publishing instance: Image pack: 44% (8.70MB/s) Publishing instance: Image pack: 45% (8.71MB/s) Publishing instance: Image pack: 46% (8.62MB/s) Publishing instance: Image pack: 46% (8.62MB/s) Publishing instance: Image pack: 47% (8.65MB/s) Publishing instance: Image pack: 48% (8.69MB/s) Publishing instance: Image pack: 49% (8.69MB/s) Publishing instance: Image pack: 50% (8.73MB/s) Publishing instance: Image pack: 51% (8.71MB/s) Publishing instance: Image pack: 52% (8.70MB/s) Publishing instance: Image pack: 53% (8.69MB/s) Publishing instance: Image pack: 54% (8.73MB/s) Publishing instance: Image pack: 55% (8.74MB/s) Publishing instance: Image pack: 55% (8.74MB/s) Publishing instance: Image pack: 56% (8.61MB/s) Publishing instance: Image pack: 57% (8.53MB/s) Publishing instance: Image pack: 58% (8.56MB/s) Publishing instance: Image pack: 59% (8.41MB/s) Publishing instance: Image pack: 60% (8.38MB/s) Publishing instance: Image pack: 61% (8.32MB/s) Publishing instance: Image pack: 62% (8.27MB/s) Publishing instance: Image pack: 63% (8.33MB/s) Publishing instance: Image pack: 64% (8.40MB/s) Publishing instance: Image pack: 64% (8.28MB/s) Publishing instance: Image pack: 65% (8.34MB/s) Publishing instance: Image pack: 66% (8.34MB/s) Publishing instance: Image pack: 67% (8.39MB/s) Publishing instance: Image pack: 68% (8.45MB/s) Publishing instance: Image pack: 69% (8.50MB/s) Publishing instance: Image pack: 70% (8.53MB/s) Publishing instance: Image pack: 71% (8.53MB/s) Publishing instance: Image pack: 72% (8.50MB/s) Publishing instance: Image pack: 73% (8.48MB/s) Publishing instance: Image pack: 73% (8.45MB/s) Publishing instance: Image pack: 74% (8.47MB/s) Publishing instance: Image pack: 75% (8.51MB/s) Publishing instance: Image pack: 76% (8.55MB/s) Publishing instance: Image pack: 77% (8.58MB/s) Publishing instance: Image pack: 78% (8.60MB/s) Publishing instance: Image pack: 79% (8.65MB/s) Publishing instance: Image pack: 80% (8.68MB/s) Publishing instance: Image pack: 81% (8.72MB/s) Publishing instance: Image pack: 82% (8.76MB/s) Publishing instance: Image pack: 83% (8.80MB/s) Publishing instance: Image pack: 83% (8.83MB/s) Publishing instance: Image pack: 84% (8.90MB/s) Publishing instance: Image pack: 85% (8.90MB/s) Publishing instance: Image pack: 86% (8.81MB/s) Publishing instance: Image pack: 87% (8.81MB/s) Publishing instance: Image pack: 88% (8.79MB/s) Publishing instance: Image pack: 89% (8.72MB/s) Publishing instance: Image pack: 90% (8.72MB/s) Publishing instance: Image pack: 91% (8.73MB/s) Publishing instance: Image pack: 92% (8.76MB/s) Publishing instance: Image pack: 92% (8.79MB/s) Publishing instance: Image pack: 93% (8.82MB/s) Publishing instance: Image pack: 94% (8.82MB/s) Publishing instance: Image pack: 95% (8.85MB/s) Publishing instance: Image pack: 96% (8.89MB/s) Publishing instance: Image pack: 97% (8.94MB/s) Publishing instance: Image pack: 98% (8.99MB/s) Publishing instance: Image pack: 99% (9.03MB/s) Publishing instance: Image pack: 100% (9.08MB/s) Publishing instance: Image pack: 100% (9.09MB/s) Publishing instance: Image pack: 100% (9.12MB/s) Instance published with fingerprint: 1334ad5edfa49e859139c81a52759c76b428215f46f864992a6cdaa4261d2930 ++ lxc delete juju-xenial-base -f ++ '[' 1 == 1 ']' ++ cache bionic virtualenv ++ series=bionic ++ container=juju-bionic-base ++ alias=juju/bionic/amd64 ++ lxc delete juju-bionic-base -f +Error: not found ++ true ++ lxc image copy ubuntu:bionic local: --alias clean-bionic + Copying the image: metadata: 100% (2.25GB/s) Copying the image: rootfs: 1% (2.54MB/s) Copying the image: rootfs: 2% (4.46MB/s) Copying the image: rootfs: 3% (6.25MB/s) Copying the image: rootfs: 4% (7.74MB/s) Copying the image: rootfs: 5% (9.17MB/s) Copying the image: rootfs: 6% (10.28MB/s) Copying the image: rootfs: 7% (11.11MB/s) Copying the image: rootfs: 8% (12.33MB/s) Copying the image: rootfs: 9% (12.97MB/s) Copying the image: rootfs: 10% (13.97MB/s) Copying the image: rootfs: 10% (14.51MB/s) Copying the image: rootfs: 11% (15.13MB/s) Copying the image: rootfs: 12% (15.22MB/s) Copying the image: rootfs: 13% (16.07MB/s) Copying the image: rootfs: 14% (16.16MB/s) Copying the image: rootfs: 15% (17.05MB/s) Copying the image: rootfs: 16% (17.16MB/s) Copying the image: rootfs: 17% (17.35MB/s) Copying the image: rootfs: 18% (17.86MB/s) Copying the image: rootfs: 19% (17.91MB/s) Copying the image: rootfs: 20% (18.14MB/s) Copying the image: rootfs: 20% (18.09MB/s) Copying the image: rootfs: 21% (18.63MB/s) Copying the image: rootfs: 22% (18.64MB/s) Copying the image: rootfs: 23% (18.62MB/s) Copying the image: rootfs: 24% (18.76MB/s) Copying the image: rootfs: 25% (18.83MB/s) Copying the image: rootfs: 26% (19.33MB/s) Copying the image: rootfs: 27% (19.34MB/s) Copying the image: rootfs: 28% (19.75MB/s) Copying the image: rootfs: 29% (19.74MB/s) Copying the image: rootfs: 30% (20.03MB/s) Copying the image: rootfs: 30% (20.11MB/s) Copying the image: rootfs: 31% (19.03MB/s) Copying the image: rootfs: 32% (19.25MB/s) Copying the image: rootfs: 33% (19.45MB/s) Copying the image: rootfs: 34% (19.43MB/s) Copying the image: rootfs: 35% (19.69MB/s) Copying the image: rootfs: 36% (19.76MB/s) Copying the image: rootfs: 37% (19.87MB/s) Copying the image: rootfs: 38% (20.00MB/s) Copying the image: rootfs: 39% (20.13MB/s) Copying the image: rootfs: 40% (20.14MB/s) Copying the image: rootfs: 40% (20.22MB/s) Copying the image: rootfs: 41% (20.45MB/s) Copying the image: rootfs: 42% (20.47MB/s) Copying the image: rootfs: 43% (20.66MB/s) Copying the image: rootfs: 44% (20.72MB/s) Copying the image: rootfs: 45% (20.79MB/s) Copying the image: rootfs: 46% (20.96MB/s) Copying the image: rootfs: 47% (21.03MB/s) Copying the image: rootfs: 48% (21.05MB/s) Copying the image: rootfs: 49% (21.26MB/s) Copying the image: rootfs: 50% (21.31MB/s) Copying the image: rootfs: 50% (21.38MB/s) Copying the image: rootfs: 51% (21.52MB/s) Copying the image: rootfs: 52% (21.58MB/s) Copying the image: rootfs: 53% (21.63MB/s) Copying the image: rootfs: 54% (21.76MB/s) Copying the image: rootfs: 55% (21.82MB/s) Copying the image: rootfs: 56% (21.96MB/s) Copying the image: rootfs: 57% (22.00MB/s) Copying the image: rootfs: 58% (22.04MB/s) Copying the image: rootfs: 59% (22.17MB/s) Copying the image: rootfs: 59% (22.21MB/s) Copying the image: rootfs: 60% (22.22MB/s) Copying the image: rootfs: 61% (22.34MB/s) Copying the image: rootfs: 62% (22.35MB/s) Copying the image: rootfs: 63% (22.51MB/s) Copying the image: rootfs: 64% (22.50MB/s) Copying the image: rootfs: 65% (22.53MB/s) Copying the image: rootfs: 66% (22.64MB/s) Copying the image: rootfs: 67% (22.65MB/s) Copying the image: rootfs: 68% (22.68MB/s) Copying the image: rootfs: 69% (22.81MB/s) Copying the image: rootfs: 69% (22.84MB/s) Copying the image: rootfs: 70% (22.94MB/s) Copying the image: rootfs: 71% (22.94MB/s) Copying the image: rootfs: 72% (22.96MB/s) Copying the image: rootfs: 73% (23.09MB/s) Copying the image: rootfs: 74% (23.09MB/s) Copying the image: rootfs: 75% (23.11MB/s) Copying the image: rootfs: 76% (23.21MB/s) Copying the image: rootfs: 77% (23.22MB/s) Copying the image: rootfs: 78% (23.30MB/s) Copying the image: rootfs: 79% (23.35MB/s) Copying the image: rootfs: 80% (23.46MB/s) Copying the image: rootfs: 81% (23.46MB/s) Copying the image: rootfs: 82% (23.43MB/s) Copying the image: rootfs: 83% (23.54MB/s) Copying the image: rootfs: 84% (23.55MB/s) Copying the image: rootfs: 85% (23.55MB/s) Copying the image: rootfs: 86% (23.65MB/s) Copying the image: rootfs: 87% (23.65MB/s) Copying the image: rootfs: 88% (23.70MB/s) Copying the image: rootfs: 88% (23.74MB/s) Copying the image: rootfs: 89% (23.76MB/s) Copying the image: rootfs: 90% (23.82MB/s) Copying the image: rootfs: 91% (23.83MB/s) Copying the image: rootfs: 92% (23.91MB/s) Copying the image: rootfs: 93% (23.90MB/s) Copying the image: rootfs: 94% (23.96MB/s) Copying the image: rootfs: 95% (23.98MB/s) Copying the image: rootfs: 96% (24.01MB/s) Copying the image: rootfs: 97% (24.05MB/s) Copying the image: rootfs: 98% (23.83MB/s) Copying the image: rootfs: 98% (23.98MB/s) Copying the image: rootfs: 99% (23.94MB/s) Copying the image: rootfs: 100% (24.03MB/s) Image copied successfully! ++ lxc launch ubuntu:bionic juju-bionic-base +Creating juju-bionic-base + Retrieving image: Unpack: 100% (3.70GB/s) Retrieving image: Unpack: 100% (3.70GB/s) Starting juju-bionic-base + Remapping container filesystem + sleep 15 ++ lxc exec juju-bionic-base -- apt-get update -y +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:3 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:5 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [8570 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic/universe Translation-en [4941 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [151 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic/multiverse Translation-en [108 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2192 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic-updates/main Translation-en [430 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1748 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/universe Translation-en [375 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [27.3 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse Translation-en [6808 B] +Get:15 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [10.0 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-backports/main Translation-en [4764 B] +Get:17 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [10.3 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-backports/universe Translation-en [4588 B] +Get:19 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [1846 kB] +Get:20 http://security.ubuntu.com/ubuntu bionic-security/main Translation-en [338 kB] +Get:21 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1137 kB] +Get:22 http://security.ubuntu.com/ubuntu bionic-security/universe Translation-en [259 kB] +Get:23 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [20.9 kB] +Get:24 http://security.ubuntu.com/ubuntu bionic-security/multiverse Translation-en [4732 B] +Fetched 22.4 MB in 18s (1227 kB/s) +Reading package lists... ++ lxc exec juju-bionic-base -- apt-get upgrade -y +Reading package lists... +Building dependency tree... +Reading state information... +Calculating upgrade... +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following packages will be upgraded: + libntfs-3g88 libssl1.0.0 ntfs-3g squashfs-tools +4 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. +4 standard security updates +Need to get 1732 kB of archives. +After this operation, 6144 B of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 ntfs-3g amd64 1:2017.3.23-2ubuntu0.18.04.3 [385 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libntfs-3g88 amd64 1:2017.3.23-2ubuntu0.18.04.3 [148 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libssl1.0.0 amd64 1.0.2n-1ubuntu5.7 [1089 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 squashfs-tools amd64 1:4.3-6ubuntu0.18.04.3 [111 kB] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 1732 kB in 0s (4927 kB/s) +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 28944 files and directories currently installed.) +Preparing to unpack .../ntfs-3g_1%3a2017.3.23-2ubuntu0.18.04.3_amd64.deb ... +Unpacking ntfs-3g (1:2017.3.23-2ubuntu0.18.04.3) over (1:2017.3.23-2ubuntu0.18.04.2) ... +Preparing to unpack .../libntfs-3g88_1%3a2017.3.23-2ubuntu0.18.04.3_amd64.deb ... +Unpacking libntfs-3g88 (1:2017.3.23-2ubuntu0.18.04.3) over (1:2017.3.23-2ubuntu0.18.04.2) ... +Preparing to unpack .../libssl1.0.0_1.0.2n-1ubuntu5.7_amd64.deb ... +Unpacking libssl1.0.0:amd64 (1.0.2n-1ubuntu5.7) over (1.0.2n-1ubuntu5.6) ... +Preparing to unpack .../squashfs-tools_1%3a4.3-6ubuntu0.18.04.3_amd64.deb ... +Unpacking squashfs-tools (1:4.3-6ubuntu0.18.04.3) over (1:4.3-6ubuntu0.18.04.2) ... +Setting up libssl1.0.0:amd64 (1.0.2n-1ubuntu5.7) ... +Setting up squashfs-tools (1:4.3-6ubuntu0.18.04.3) ... +Setting up libntfs-3g88 (1:2017.3.23-2ubuntu0.18.04.3) ... +Setting up ntfs-3g (1:2017.3.23-2ubuntu0.18.04.3) ... +Processing triggers for initramfs-tools (0.130ubuntu3.13) ... +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... ++ lxc exec juju-bionic-base -- apt-get install -y curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan gcc build-essential python3-pip python3-setuptools python3-yaml virtualenv +Reading package lists... +Building dependency tree... +Reading state information... +python3-yaml is already the newest version (3.12-1build2). +python3-yaml set to manually installed. +curl is already the newest version (7.58.0-2ubuntu3.14). +curl set to manually installed. +tmux is already the newest version (2.6-3ubuntu0.2). +tmux set to manually installed. +The following package was automatically installed and is no longer required: + libfreetype6 +Use 'apt autoremove' to remove it. +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu cloud-image-utils cpp + cpp-7 dh-python dpkg-dev fakeroot g++ g++-7 gcc-7 gcc-7-base genisoimage + ibverbs-providers libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan4 libatomic1 libbinutils libc-dev-bin + libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev libfakeroot + libfile-fcntllock-perl libgcc-7-dev libgomp1 libibverbs1 libiscsi7 libisl19 + libitm1 liblsan0 libmpc3 libmpx2 libnl-3-200 libnl-route-3-200 libnspr4 + libnss3 libpython3-dev libpython3.6-dev libquadmath0 librados2 librbd1 + libstdc++-7-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-crypto python3-dev python3-distutils + python3-keyring python3-keyrings.alt python3-lib2to3 python3-secretstorage + python3-virtualenv python3-wheel python3-xdg python3.6-dev qemu-block-extra + qemu-utils sharutils +Suggested packages: + binutils-doc ifupdown cloud-utils-euca mtools cpp-doc gcc-7-locales + debian-keyring g++-multilib g++-7-multilib gcc-7-doc libstdc++6-7-dbg + gcc-multilib autoconf automake libtool flex bison gdb gcc-doc gcc-7-multilib + libgcc1-dbg libgomp1-dbg libitm1-dbg libatomic1-dbg libasan4-dbg + liblsan0-dbg libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx2-dbg + libquadmath0-dbg wodim cdrkit-doc glibc-doc bzr libstdc++-7-doc make-doc + python-crypto-doc gnome-keyring libkf5wallet-bin gir1.2-gnomekeyring-1.0 + python-secretstorage-doc python-setuptools-doc debootstrap sharutils-doc + bsd-mailx | mailx +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu bridge-utils + build-essential cloud-image-utils cloud-utils cpp cpp-7 cpu-checker + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base genisoimage + ibverbs-providers libaio1 libalgorithm-diff-perl libalgorithm-diff-xs-perl + libalgorithm-merge-perl libasan4 libatomic1 libbinutils libc-dev-bin + libc6-dev libcc1-0 libcilkrts5 libdpkg-perl libexpat1-dev libfakeroot + libfile-fcntllock-perl libgcc-7-dev libgomp1 libibverbs1 libiscsi7 libisl19 + libitm1 liblsan0 libmpc3 libmpx2 libnl-3-200 libnl-route-3-200 libnspr4 + libnss3 libpython3-dev libpython3.6-dev libquadmath0 librados2 librbd1 + libstdc++-7-dev libtsan0 libubsan0 linux-libc-dev make manpages-dev + msr-tools python-pip-whl python3-crypto python3-dev python3-distutils + python3-keyring python3-keyrings.alt python3-lib2to3 python3-pip + python3-secretstorage python3-setuptools python3-virtualenv python3-wheel + python3-xdg python3.6-dev qemu-block-extra qemu-utils sharutils ubuntu-fan + virtualenv +0 upgraded, 79 newly installed, 0 to remove and 0 not upgraded. +Need to get 98.2 MB of archives. +After this operation, 283 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-common amd64 2.30-21ubuntu1~18.04.5 [197 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libbinutils amd64 2.30-21ubuntu1~18.04.5 [489 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.30-21ubuntu1~18.04.5 [1839 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils amd64 2.30-21ubuntu1~18.04.5 [3388 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic/main amd64 bridge-utils amd64 1.5-15ubuntu1 [30.1 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc-dev-bin amd64 2.27-3ubuntu1.4 [71.8 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 linux-libc-dev amd64 4.15.0-154.161 [988 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev amd64 2.27-3ubuntu1.4 [2585 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7-base amd64 7.5.0-3ubuntu1~18.04 [18.3 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic/main amd64 libisl19 amd64 0.19-1 [551 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic/main amd64 libmpc3 amd64 1.1.0-1 [40.8 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp-7 amd64 7.5.0-3ubuntu1~18.04 [8591 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp amd64 4:7.4.0-1ubuntu2.3 [27.7 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcc1-0 amd64 8.4.0-1ubuntu1~18.04 [39.4 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgomp1 amd64 8.4.0-1ubuntu1~18.04 [76.5 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libitm1 amd64 8.4.0-1ubuntu1~18.04 [27.9 kB] +Get:17 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libatomic1 amd64 8.4.0-1ubuntu1~18.04 [9192 B] +Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libasan4 amd64 7.5.0-3ubuntu1~18.04 [358 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 liblsan0 amd64 8.4.0-1ubuntu1~18.04 [133 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libtsan0 amd64 8.4.0-1ubuntu1~18.04 [288 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libubsan0 amd64 7.5.0-3ubuntu1~18.04 [126 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcilkrts5 amd64 7.5.0-3ubuntu1~18.04 [42.5 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmpx2 amd64 8.4.0-1ubuntu1~18.04 [11.6 kB] +Get:24 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libquadmath0 amd64 8.4.0-1ubuntu1~18.04 [134 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [2378 kB] +Get:26 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7 amd64 7.5.0-3ubuntu1~18.04 [9381 kB] +Get:27 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc amd64 4:7.4.0-1ubuntu2.3 [5184 B] +Get:28 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libstdc++-7-dev amd64 7.5.0-3ubuntu1~18.04 [1471 kB] +Get:29 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++-7 amd64 7.5.0-3ubuntu1~18.04 [9697 kB] +Get:30 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++ amd64 4:7.4.0-1ubuntu2.3 [1568 B] +Get:31 http://archive.ubuntu.com/ubuntu bionic/main amd64 make amd64 4.1-9.1ubuntu1 [154 kB] +Get:32 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libdpkg-perl all 1.19.0.5ubuntu2.3 [211 kB] +Get:33 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 dpkg-dev all 1.19.0.5ubuntu2.3 [607 kB] +Get:34 http://archive.ubuntu.com/ubuntu bionic/main amd64 build-essential amd64 12.4ubuntu1 [4758 B] +Get:35 http://archive.ubuntu.com/ubuntu bionic/main amd64 msr-tools amd64 1.3-2build1 [9760 B] +Get:36 http://archive.ubuntu.com/ubuntu bionic/main amd64 cpu-checker amd64 0.7-0ubuntu7 [6862 B] +Get:37 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-lib2to3 all 3.6.9-1~18.04 [77.4 kB] +Get:38 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-distutils all 3.6.9-1~18.04 [144 kB] +Get:39 http://archive.ubuntu.com/ubuntu bionic/main amd64 dh-python all 3.20180325ubuntu2 [89.2 kB] +Get:40 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfakeroot amd64 1.22-2ubuntu1 [25.9 kB] +Get:41 http://archive.ubuntu.com/ubuntu bionic/main amd64 fakeroot amd64 1.22-2ubuntu1 [62.3 kB] +Get:42 http://archive.ubuntu.com/ubuntu bionic/main amd64 genisoimage amd64 9:1.1.11-3ubuntu2 [328 kB] +Get:43 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnl-3-200 amd64 3.2.29-0ubuntu3 [52.8 kB] +Get:44 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnl-route-3-200 amd64 3.2.29-0ubuntu3 [146 kB] +Get:45 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libibverbs1 amd64 17.1-1ubuntu0.2 [44.4 kB] +Get:46 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 ibverbs-providers amd64 17.1-1ubuntu0.2 [160 kB] +Get:47 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libaio1 amd64 0.3.110-5ubuntu0.1 [6476 B] +Get:48 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:49 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-xs-perl amd64 0.04-5 [11.1 kB] +Get:50 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:51 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libexpat1-dev amd64 2.2.5-3ubuntu0.2 [122 kB] +Get:52 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfile-fcntllock-perl amd64 0.22-3build2 [33.2 kB] +Get:53 http://archive.ubuntu.com/ubuntu bionic/main amd64 libiscsi7 amd64 1.17.0-1.1 [55.4 kB] +Get:54 http://archive.ubuntu.com/ubuntu bionic/main amd64 libnspr4 amd64 2:4.18-1ubuntu1 [112 kB] +Get:55 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libnss3 amd64 2:3.35-2ubuntu2.12 [1220 kB] +Get:56 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3.6-dev amd64 3.6.9-1~18.04ubuntu1.4 [44.9 MB] +Get:57 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3-dev amd64 3.6.7-1~18.04 [7328 B] +Get:58 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 librados2 amd64 12.2.13-0ubuntu0.18.04.8 [2724 kB] +Get:59 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 librbd1 amd64 12.2.13-0ubuntu0.18.04.8 [923 kB] +Get:60 http://archive.ubuntu.com/ubuntu bionic/main amd64 manpages-dev all 4.15-1 [2217 kB] +Get:61 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-pip-whl all 9.0.1-2.3~ubuntu1.18.04.5 [1653 kB] +Get:62 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-crypto amd64 2.6.1-8ubuntu2 [244 kB] +Get:63 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3.6-dev amd64 3.6.9-1~18.04ubuntu1.4 [508 kB] +Get:64 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-dev amd64 3.6.7-1~18.04 [1288 B] +Get:65 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-secretstorage all 2.3.1-2 [12.1 kB] +Get:66 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyring all 10.6.0-1 [26.7 kB] +Get:67 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyrings.alt all 3.0-1 [16.6 kB] +Get:68 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python3-pip all 9.0.1-2.3~ubuntu1.18.04.5 [114 kB] +Get:69 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-setuptools all 39.0.1-2 [248 kB] +Get:70 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-virtualenv all 15.1.0+ds-1.1 [43.4 kB] +Get:71 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-wheel all 0.30.0-0.2 [36.5 kB] +Get:72 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-xdg all 0.25-4ubuntu1.1 [31.3 kB] +Get:73 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 qemu-block-extra amd64 1:2.11+dfsg-1ubuntu7.37 [41.4 kB] +Get:74 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 qemu-utils amd64 1:2.11+dfsg-1ubuntu7.37 [869 kB] +Get:75 http://archive.ubuntu.com/ubuntu bionic/main amd64 sharutils amd64 1:4.15.2-3 [155 kB] +Get:76 http://archive.ubuntu.com/ubuntu bionic/main amd64 ubuntu-fan all 0.12.10 [34.7 kB] +Get:77 http://archive.ubuntu.com/ubuntu bionic/universe amd64 virtualenv all 15.1.0+ds-1.1 [4476 B] +Get:78 http://archive.ubuntu.com/ubuntu bionic/main amd64 cloud-image-utils all 0.30-0ubuntu5 [16.9 kB] +Get:79 http://archive.ubuntu.com/ubuntu bionic/main amd64 cloud-utils all 0.30-0ubuntu5 [1596 B] +dpkg-preconfigure: unable to re-open stdin: No such file or directory +Fetched 98.2 MB in 4s (23.8 MB/s) +Selecting previously unselected package binutils-common:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 28944 files and directories currently installed.) +Preparing to unpack .../00-binutils-common_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils-common:amd64 (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../01-libbinutils_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking libbinutils:amd64 (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../02-binutils-x86-64-linux-gnu_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package binutils. +Preparing to unpack .../03-binutils_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package bridge-utils. +Preparing to unpack .../04-bridge-utils_1.5-15ubuntu1_amd64.deb ... +Unpacking bridge-utils (1.5-15ubuntu1) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../05-libc-dev-bin_2.27-3ubuntu1.4_amd64.deb ... +Unpacking libc-dev-bin (2.27-3ubuntu1.4) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../06-linux-libc-dev_4.15.0-154.161_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.15.0-154.161) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../07-libc6-dev_2.27-3ubuntu1.4_amd64.deb ... +Unpacking libc6-dev:amd64 (2.27-3ubuntu1.4) ... +Selecting previously unselected package gcc-7-base:amd64. +Preparing to unpack .../08-gcc-7-base_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libisl19:amd64. +Preparing to unpack .../09-libisl19_0.19-1_amd64.deb ... +Unpacking libisl19:amd64 (0.19-1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../10-libmpc3_1.1.0-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.1.0-1) ... +Selecting previously unselected package cpp-7. +Preparing to unpack .../11-cpp-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking cpp-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package cpp. +Preparing to unpack .../12-cpp_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking cpp (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../13-libcc1-0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../14-libgomp1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../15-libitm1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../16-libatomic1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libasan4:amd64. +Preparing to unpack .../17-libasan4_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../18-liblsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../19-libtsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../20-libubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../21-libcilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libmpx2:amd64. +Preparing to unpack .../22-libmpx2_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../23-libquadmath0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgcc-7-dev:amd64. +Preparing to unpack .../24-libgcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc-7. +Preparing to unpack .../25-gcc-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc. +Preparing to unpack .../26-gcc_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking gcc (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libstdc++-7-dev:amd64. +Preparing to unpack .../27-libstdc++-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++-7. +Preparing to unpack .../28-g++-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking g++-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++. +Preparing to unpack .../29-g++_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking g++ (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package make. +Preparing to unpack .../30-make_4.1-9.1ubuntu1_amd64.deb ... +Unpacking make (4.1-9.1ubuntu1) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../31-libdpkg-perl_1.19.0.5ubuntu2.3_all.deb ... +Unpacking libdpkg-perl (1.19.0.5ubuntu2.3) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../32-dpkg-dev_1.19.0.5ubuntu2.3_all.deb ... +Unpacking dpkg-dev (1.19.0.5ubuntu2.3) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../33-build-essential_12.4ubuntu1_amd64.deb ... +Unpacking build-essential (12.4ubuntu1) ... +Selecting previously unselected package msr-tools. +Preparing to unpack .../34-msr-tools_1.3-2build1_amd64.deb ... +Unpacking msr-tools (1.3-2build1) ... +Selecting previously unselected package cpu-checker. +Preparing to unpack .../35-cpu-checker_0.7-0ubuntu7_amd64.deb ... +Unpacking cpu-checker (0.7-0ubuntu7) ... +Selecting previously unselected package python3-lib2to3. +Preparing to unpack .../36-python3-lib2to3_3.6.9-1~18.04_all.deb ... +Unpacking python3-lib2to3 (3.6.9-1~18.04) ... +Selecting previously unselected package python3-distutils. +Preparing to unpack .../37-python3-distutils_3.6.9-1~18.04_all.deb ... +Unpacking python3-distutils (3.6.9-1~18.04) ... +Selecting previously unselected package dh-python. +Preparing to unpack .../38-dh-python_3.20180325ubuntu2_all.deb ... +Unpacking dh-python (3.20180325ubuntu2) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../39-libfakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.22-2ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../40-fakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking fakeroot (1.22-2ubuntu1) ... +Selecting previously unselected package genisoimage. +Preparing to unpack .../41-genisoimage_9%3a1.1.11-3ubuntu2_amd64.deb ... +Unpacking genisoimage (9:1.1.11-3ubuntu2) ... +Selecting previously unselected package libnl-3-200:amd64. +Preparing to unpack .../42-libnl-3-200_3.2.29-0ubuntu3_amd64.deb ... +Unpacking libnl-3-200:amd64 (3.2.29-0ubuntu3) ... +Selecting previously unselected package libnl-route-3-200:amd64. +Preparing to unpack .../43-libnl-route-3-200_3.2.29-0ubuntu3_amd64.deb ... +Unpacking libnl-route-3-200:amd64 (3.2.29-0ubuntu3) ... +Selecting previously unselected package libibverbs1:amd64. +Preparing to unpack .../44-libibverbs1_17.1-1ubuntu0.2_amd64.deb ... +Unpacking libibverbs1:amd64 (17.1-1ubuntu0.2) ... +Selecting previously unselected package ibverbs-providers:amd64. +Preparing to unpack .../45-ibverbs-providers_17.1-1ubuntu0.2_amd64.deb ... +Unpacking ibverbs-providers:amd64 (17.1-1ubuntu0.2) ... +Selecting previously unselected package libaio1:amd64. +Preparing to unpack .../46-libaio1_0.3.110-5ubuntu0.1_amd64.deb ... +Unpacking libaio1:amd64 (0.3.110-5ubuntu0.1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../47-libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../48-libalgorithm-diff-xs-perl_0.04-5_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-5) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../49-libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../50-libexpat1-dev_2.2.5-3ubuntu0.2_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.2.5-3ubuntu0.2) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../51-libfile-fcntllock-perl_0.22-3build2_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3build2) ... +Selecting previously unselected package libiscsi7:amd64. +Preparing to unpack .../52-libiscsi7_1.17.0-1.1_amd64.deb ... +Unpacking libiscsi7:amd64 (1.17.0-1.1) ... +Selecting previously unselected package libnspr4:amd64. +Preparing to unpack .../53-libnspr4_2%3a4.18-1ubuntu1_amd64.deb ... +Unpacking libnspr4:amd64 (2:4.18-1ubuntu1) ... +Selecting previously unselected package libnss3:amd64. +Preparing to unpack .../54-libnss3_2%3a3.35-2ubuntu2.12_amd64.deb ... +Unpacking libnss3:amd64 (2:3.35-2ubuntu2.12) ... +Selecting previously unselected package libpython3.6-dev:amd64. +Preparing to unpack .../55-libpython3.6-dev_3.6.9-1~18.04ubuntu1.4_amd64.deb ... +Unpacking libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.4) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../56-libpython3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.6.7-1~18.04) ... +Selecting previously unselected package librados2. +Preparing to unpack .../57-librados2_12.2.13-0ubuntu0.18.04.8_amd64.deb ... +Unpacking librados2 (12.2.13-0ubuntu0.18.04.8) ... +Selecting previously unselected package librbd1. +Preparing to unpack .../58-librbd1_12.2.13-0ubuntu0.18.04.8_amd64.deb ... +Unpacking librbd1 (12.2.13-0ubuntu0.18.04.8) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../59-manpages-dev_4.15-1_all.deb ... +Unpacking manpages-dev (4.15-1) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../60-python-pip-whl_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-crypto. +Preparing to unpack .../61-python3-crypto_2.6.1-8ubuntu2_amd64.deb ... +Unpacking python3-crypto (2.6.1-8ubuntu2) ... +Selecting previously unselected package python3.6-dev. +Preparing to unpack .../62-python3.6-dev_3.6.9-1~18.04ubuntu1.4_amd64.deb ... +Unpacking python3.6-dev (3.6.9-1~18.04ubuntu1.4) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../63-python3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking python3-dev (3.6.7-1~18.04) ... +Selecting previously unselected package python3-secretstorage. +Preparing to unpack .../64-python3-secretstorage_2.3.1-2_all.deb ... +Unpacking python3-secretstorage (2.3.1-2) ... +Selecting previously unselected package python3-keyring. +Preparing to unpack .../65-python3-keyring_10.6.0-1_all.deb ... +Unpacking python3-keyring (10.6.0-1) ... +Selecting previously unselected package python3-keyrings.alt. +Preparing to unpack .../66-python3-keyrings.alt_3.0-1_all.deb ... +Unpacking python3-keyrings.alt (3.0-1) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../67-python3-pip_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../68-python3-setuptools_39.0.1-2_all.deb ... +Unpacking python3-setuptools (39.0.1-2) ... +Selecting previously unselected package python3-virtualenv. +Preparing to unpack .../69-python3-virtualenv_15.1.0+ds-1.1_all.deb ... +Unpacking python3-virtualenv (15.1.0+ds-1.1) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../70-python3-wheel_0.30.0-0.2_all.deb ... +Unpacking python3-wheel (0.30.0-0.2) ... +Selecting previously unselected package python3-xdg. +Preparing to unpack .../71-python3-xdg_0.25-4ubuntu1.1_all.deb ... +Unpacking python3-xdg (0.25-4ubuntu1.1) ... +Selecting previously unselected package qemu-block-extra:amd64. +Preparing to unpack .../72-qemu-block-extra_1%3a2.11+dfsg-1ubuntu7.37_amd64.deb ... +Unpacking qemu-block-extra:amd64 (1:2.11+dfsg-1ubuntu7.37) ... +Selecting previously unselected package qemu-utils. +Preparing to unpack .../73-qemu-utils_1%3a2.11+dfsg-1ubuntu7.37_amd64.deb ... +Unpacking qemu-utils (1:2.11+dfsg-1ubuntu7.37) ... +Selecting previously unselected package sharutils. +Preparing to unpack .../74-sharutils_1%3a4.15.2-3_amd64.deb ... +Unpacking sharutils (1:4.15.2-3) ... +Selecting previously unselected package ubuntu-fan. +Preparing to unpack .../75-ubuntu-fan_0.12.10_all.deb ... +Unpacking ubuntu-fan (0.12.10) ... +Selecting previously unselected package virtualenv. +Preparing to unpack .../76-virtualenv_15.1.0+ds-1.1_all.deb ... +Unpacking virtualenv (15.1.0+ds-1.1) ... +Selecting previously unselected package cloud-image-utils. +Preparing to unpack .../77-cloud-image-utils_0.30-0ubuntu5_all.deb ... +Unpacking cloud-image-utils (0.30-0ubuntu5) ... +Selecting previously unselected package cloud-utils. +Preparing to unpack .../78-cloud-utils_0.30-0ubuntu5_all.deb ... +Unpacking cloud-utils (0.30-0ubuntu5) ... +Setting up libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up make (4.1-9.1ubuntu1) ... +Setting up python3-crypto (2.6.1-8ubuntu2) ... +Setting up libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up sharutils (1:4.15.2-3) ... +Setting up libiscsi7:amd64 (1.17.0-1.1) ... +Setting up python3-xdg (0.25-4ubuntu1.1) ... +Setting up python3-keyrings.alt (3.0-1) ... +Setting up linux-libc-dev:amd64 (4.15.0-154.161) ... +Setting up genisoimage (9:1.1.11-3ubuntu2) ... +Setting up libdpkg-perl (1.19.0.5ubuntu2.3) ... +Setting up python3-wheel (0.30.0-0.2) ... +Setting up msr-tools (1.3-2build1) ... +Setting up libnspr4:amd64 (2:4.18-1ubuntu1) ... +Setting up bridge-utils (1.5-15ubuntu1) ... +Setting up liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up binutils-common:amd64 (2.30-21ubuntu1~18.04.5) ... +Setting up libfile-fcntllock-perl (0.22-3build2) ... +Setting up libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libaio1:amd64 (0.3.110-5ubuntu0.1) ... +Setting up ubuntu-fan (0.12.10) ... +Created symlink /etc/systemd/system/multi-user.target.wants/ubuntu-fan.service → /lib/systemd/system/ubuntu-fan.service. +Setting up cpu-checker (0.7-0ubuntu7) ... +Setting up libfakeroot:amd64 (1.22-2ubuntu1) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libmpc3:amd64 (1.1.0-1) ... +Setting up libc-dev-bin (2.27-3ubuntu1.4) ... +Setting up libnl-3-200:amd64 (3.2.29-0ubuntu3) ... +Setting up python3-lib2to3 (3.6.9-1~18.04) ... +Setting up python3-secretstorage (2.3.1-2) ... +Setting up manpages-dev (4.15-1) ... +Setting up libc6-dev:amd64 (2.27-3ubuntu1.4) ... +Setting up python3-distutils (3.6.9-1~18.04) ... +Setting up libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libisl19:amd64 (0.19-1) ... +Setting up libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-keyring (10.6.0-1) ... +Setting up libbinutils:amd64 (2.30-21ubuntu1~18.04.5) ... +Setting up libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libnss3:amd64 (2:3.35-2ubuntu2.12) ... +Setting up libnl-route-3-200:amd64 (3.2.29-0ubuntu3) ... +Setting up fakeroot (1.22-2ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up cpp-7 (7.5.0-3ubuntu1~18.04) ... +Setting up libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libalgorithm-diff-xs-perl (0.04-5) ... +Setting up python3-virtualenv (15.1.0+ds-1.1) ... +Setting up python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libexpat1-dev:amd64 (2.2.5-3ubuntu0.2) ... +Setting up python3-setuptools (39.0.1-2) ... +Setting up dh-python (3.20180325ubuntu2) ... +Setting up virtualenv (15.1.0+ds-1.1) ... +Setting up binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.5) ... +Setting up libibverbs1:amd64 (17.1-1ubuntu0.2) ... +Setting up cpp (4:7.4.0-1ubuntu2.3) ... +Setting up libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.4) ... +Setting up librados2 (12.2.13-0ubuntu0.18.04.8) ... +Setting up ibverbs-providers:amd64 (17.1-1ubuntu0.2) ... +Setting up binutils (2.30-21ubuntu1~18.04.5) ... +Setting up python3.6-dev (3.6.9-1~18.04ubuntu1.4) ... +Setting up libpython3-dev:amd64 (3.6.7-1~18.04) ... +Setting up gcc-7 (7.5.0-3ubuntu1~18.04) ... +Setting up g++-7 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-dev (3.6.7-1~18.04) ... +Setting up librbd1 (12.2.13-0ubuntu0.18.04.8) ... +Setting up gcc (4:7.4.0-1ubuntu2.3) ... +Setting up qemu-block-extra:amd64 (1:2.11+dfsg-1ubuntu7.37) ... +Setting up qemu-utils (1:2.11+dfsg-1ubuntu7.37) ... +Setting up dpkg-dev (1.19.0.5ubuntu2.3) ... +Setting up g++ (4:7.4.0-1ubuntu2.3) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up cloud-image-utils (0.30-0ubuntu5) ... +Setting up build-essential (12.4ubuntu1) ... +Setting up cloud-utils (0.30-0ubuntu5) ... +Processing triggers for install-info (6.5.0.dfsg.1-2) ... +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +Processing triggers for systemd (237-3ubuntu10.51) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for ureadahead (0.100.0-21) ... ++ echo 'Installing Juju agent 2.9.12-ubuntu-amd64' +Installing Juju agent 2.9.12-ubuntu-amd64 ++ lxc exec juju-bionic-base -- mkdir -p /var/lib/juju/tools/2.9.12-ubuntu-amd64/ ++ lxc exec juju-bionic-base -- curl -sS --connect-timeout 20 --noproxy '*' --insecure -o /var/lib/juju/tools/2.9.12-ubuntu-amd64/tools.tar.gz https://streams.canonical.com/juju/tools/agent/2.9.12/juju-2.9.12-ubuntu-amd64.tgz ++ lxc exec juju-bionic-base -- tar zxf /var/lib/juju/tools/2.9.12-ubuntu-amd64/tools.tar.gz -C /var/lib/juju/tools/2.9.12-ubuntu-amd64 ++ lxc stop juju-bionic-base ++ lxc image delete juju/bionic/amd64 +Error: not found ++ true ++ lxc image delete clean-bionic +++ date +%Y%m%d ++ lxc publish juju-bionic-base --alias juju/bionic/amd64 'description=bionic juju dev image (20210906)' + Publishing instance: Image pack: 1% (3.26MB/s) Publishing instance: Image pack: 2% (3.43MB/s) Publishing instance: Image pack: 3% (4.39MB/s) Publishing instance: Image pack: 4% (4.97MB/s) Publishing instance: Image pack: 5% (5.33MB/s) Publishing instance: Image pack: 6% (5.54MB/s) Publishing instance: Image pack: 7% (5.65MB/s) Publishing instance: Image pack: 8% (5.75MB/s) Publishing instance: Image pack: 9% (5.91MB/s) Publishing instance: Image pack: 10% (5.97MB/s) Publishing instance: Image pack: 10% (6.11MB/s) Publishing instance: Image pack: 11% (6.28MB/s) Publishing instance: Image pack: 12% (6.37MB/s) Publishing instance: Image pack: 13% (6.29MB/s) Publishing instance: Image pack: 14% (6.36MB/s) Publishing instance: Image pack: 15% (6.39MB/s) Publishing instance: Image pack: 16% (6.43MB/s) Publishing instance: Image pack: 17% (6.52MB/s) Publishing instance: Image pack: 18% (6.61MB/s) Publishing instance: Image pack: 19% (6.76MB/s) Publishing instance: Image pack: 19% (6.88MB/s) Publishing instance: Image pack: 20% (6.90MB/s) Publishing instance: Image pack: 21% (6.93MB/s) Publishing instance: Image pack: 22% (6.90MB/s) Publishing instance: Image pack: 23% (6.88MB/s) Publishing instance: Image pack: 24% (6.88MB/s) Publishing instance: Image pack: 25% (6.86MB/s) Publishing instance: Image pack: 26% (6.80MB/s) Publishing instance: Image pack: 27% (6.85MB/s) Publishing instance: Image pack: 28% (6.87MB/s) Publishing instance: Image pack: 28% (6.96MB/s) Publishing instance: Image pack: 29% (6.99MB/s) Publishing instance: Image pack: 30% (7.12MB/s) Publishing instance: Image pack: 31% (7.24MB/s) Publishing instance: Image pack: 32% (7.36MB/s) Publishing instance: Image pack: 33% (7.47MB/s) Publishing instance: Image pack: 34% (7.43MB/s) Publishing instance: Image pack: 35% (7.38MB/s) Publishing instance: Image pack: 36% (7.42MB/s) Publishing instance: Image pack: 37% (7.47MB/s) Publishing instance: Image pack: 37% (7.55MB/s) Publishing instance: Image pack: 38% (7.55MB/s) Publishing instance: Image pack: 39% (7.56MB/s) Publishing instance: Image pack: 40% (7.58MB/s) Publishing instance: Image pack: 41% (7.57MB/s) Publishing instance: Image pack: 42% (7.55MB/s) Publishing instance: Image pack: 43% (7.56MB/s) Publishing instance: Image pack: 44% (7.55MB/s) Publishing instance: Image pack: 45% (7.54MB/s) Publishing instance: Image pack: 46% (7.57MB/s) Publishing instance: Image pack: 46% (7.60MB/s) Publishing instance: Image pack: 47% (7.58MB/s) Publishing instance: Image pack: 48% (7.58MB/s) Publishing instance: Image pack: 49% (7.60MB/s) Publishing instance: Image pack: 50% (7.61MB/s) Publishing instance: Image pack: 51% (7.57MB/s) Publishing instance: Image pack: 52% (7.51MB/s) Publishing instance: Image pack: 53% (7.51MB/s) Publishing instance: Image pack: 54% (7.42MB/s) Publishing instance: Image pack: 55% (7.41MB/s) Publishing instance: Image pack: 55% (7.46MB/s) Publishing instance: Image pack: 56% (7.45MB/s) Publishing instance: Image pack: 57% (7.47MB/s) Publishing instance: Image pack: 58% (7.40MB/s) Publishing instance: Image pack: 59% (7.34MB/s) Publishing instance: Image pack: 60% (7.34MB/s) Publishing instance: Image pack: 61% (7.21MB/s) Publishing instance: Image pack: 62% (7.14MB/s) Publishing instance: Image pack: 63% (7.09MB/s) Publishing instance: Image pack: 64% (7.07MB/s) Publishing instance: Image pack: 64% (7.10MB/s) Publishing instance: Image pack: 65% (7.13MB/s) Publishing instance: Image pack: 66% (7.15MB/s) Publishing instance: Image pack: 67% (7.19MB/s) Publishing instance: Image pack: 68% (7.21MB/s) Publishing instance: Image pack: 69% (7.25MB/s) Publishing instance: Image pack: 70% (7.23MB/s) Publishing instance: Image pack: 71% (7.25MB/s) Publishing instance: Image pack: 72% (7.26MB/s) Publishing instance: Image pack: 73% (7.28MB/s) Publishing instance: Image pack: 73% (7.28MB/s) Publishing instance: Image pack: 74% (7.26MB/s) Publishing instance: Image pack: 75% (7.29MB/s) Publishing instance: Image pack: 76% (7.34MB/s) Publishing instance: Image pack: 77% (7.38MB/s) Publishing instance: Image pack: 78% (7.43MB/s) Publishing instance: Image pack: 79% (7.47MB/s) Publishing instance: Image pack: 80% (7.49MB/s) Publishing instance: Image pack: 81% (7.53MB/s) Publishing instance: Image pack: 82% (7.55MB/s) Publishing instance: Image pack: 83% (7.58MB/s) Publishing instance: Image pack: 83% (7.59MB/s) Publishing instance: Image pack: 84% (7.61MB/s) Publishing instance: Image pack: 85% (7.67MB/s) Publishing instance: Image pack: 86% (7.71MB/s) Publishing instance: Image pack: 87% (7.69MB/s) Publishing instance: Image pack: 88% (7.71MB/s) Publishing instance: Image pack: 89% (7.73MB/s) Publishing instance: Image pack: 90% (7.62MB/s) Publishing instance: Image pack: 91% (7.65MB/s) Publishing instance: Image pack: 92% (7.68MB/s) Publishing instance: Image pack: 92% (7.70MB/s) Publishing instance: Image pack: 93% (7.72MB/s) Publishing instance: Image pack: 94% (7.74MB/s) Publishing instance: Image pack: 95% (7.76MB/s) Publishing instance: Image pack: 96% (7.75MB/s) Publishing instance: Image pack: 97% (7.77MB/s) Publishing instance: Image pack: 98% (7.76MB/s) Publishing instance: Image pack: 99% (7.75MB/s) Publishing instance: Image pack: 100% (7.77MB/s) Publishing instance: Image pack: 100% (7.75MB/s) Publishing instance: Image pack: 100% (7.76MB/s) Instance published with fingerprint: 7f6877709cdc8ff35294f0919b996896d3330fc070b0bced20dd711710ab3f2e ++ lxc delete juju-bionic-base -f +Finished installation of juju + +k8s substrate added as cloud "k8scloud" with storage provisioned +by the existing "openebs-hostpath" storage class. +You can now bootstrap to this cloud by running 'juju bootstrap k8scloud'. +Creating Juju controller "osm" on k8scloud +Bootstrap to generic Kubernetes cluster +Fetching Juju Dashboard 0.8.1 +Creating k8s resources for controller "controller-osm" +Downloading images +Starting controller pod +Bootstrap agent now started +Contacting Juju controller at 192.168.64.19 to verify accessibility... + +Bootstrap complete, controller "osm" is now available in namespace "controller-osm" + +Now you can run + juju add-model +to create a new model to deploy k8s workloads. +Can't load /home/ubuntu/.rnd into RNG +140324386386368:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/home/ubuntu/.rnd +Generating a RSA private key +............+++++ +................................................................................+++++ +writing new private key to '/tmp/.osm/client.key' +----- +Cloud "lxd-cloud" added to controller "osm". +WARNING loading credentials: credentials for cloud lxd-cloud not found +To upload a credential to the controller for cloud "lxd-cloud", use +* 'add-model' with --credential option or +* 'add-credential -c lxd-cloud'. +Using cloud "lxd-cloud" from the controller to verify credentials. +Controller credential "lxd-cloud" for user "admin" for cloud "lxd-cloud" on controller "osm" added. +For more information, see ‘juju show-credential lxd-cloud lxd-cloud’. +Pulling and generating docker images +Pulling docker images +Using default tag: latest +latest: Pulling from wurstmeister/zookeeper +a3ed95caeb02: Pulling fs layer +ef38b711a50f: Pulling fs layer +e057c74597c7: Pulling fs layer +666c214f6385: Pulling fs layer +c3d6a96f1ffc: Pulling fs layer +3fe26a83e0ca: Pulling fs layer +3d3a7dd3a3b1: Pulling fs layer +f8cc938abe5f: Pulling fs layer +9978b75f7a58: Pulling fs layer +4d4dbcc8f8cc: Pulling fs layer +8b130a9baa49: Pulling fs layer +6b9611650a73: Pulling fs layer +5df5aac51927: Pulling fs layer +76eea4448d9b: Pulling fs layer +8b66990876c6: Pulling fs layer +f0dd38204b6f: Pulling fs layer +666c214f6385: Waiting +c3d6a96f1ffc: Waiting +3fe26a83e0ca: Waiting +3d3a7dd3a3b1: Waiting +f8cc938abe5f: Waiting +9978b75f7a58: Waiting +4d4dbcc8f8cc: Waiting +8b130a9baa49: Waiting +6b9611650a73: Waiting +5df5aac51927: Waiting +76eea4448d9b: Waiting +8b66990876c6: Waiting +f0dd38204b6f: Waiting +e057c74597c7: Verifying Checksum +e057c74597c7: Download complete +a3ed95caeb02: Verifying Checksum +a3ed95caeb02: Download complete +666c214f6385: Verifying Checksum +666c214f6385: Download complete +c3d6a96f1ffc: Verifying Checksum +c3d6a96f1ffc: Download complete +3fe26a83e0ca: Verifying Checksum +3fe26a83e0ca: Download complete +a3ed95caeb02: Pull complete +f8cc938abe5f: Verifying Checksum +f8cc938abe5f: Download complete +9978b75f7a58: Verifying Checksum +9978b75f7a58: Download complete +4d4dbcc8f8cc: Verifying Checksum +4d4dbcc8f8cc: Download complete +ef38b711a50f: Verifying Checksum +ef38b711a50f: Download complete +6b9611650a73: Verifying Checksum +6b9611650a73: Download complete +8b130a9baa49: Verifying Checksum +8b130a9baa49: Download complete +76eea4448d9b: Verifying Checksum +76eea4448d9b: Download complete +5df5aac51927: Verifying Checksum +5df5aac51927: Download complete +8b66990876c6: Verifying Checksum +8b66990876c6: Download complete +f0dd38204b6f: Verifying Checksum +f0dd38204b6f: Download complete +3d3a7dd3a3b1: Verifying Checksum +3d3a7dd3a3b1: Download complete +ef38b711a50f: Pull complete +e057c74597c7: Pull complete +666c214f6385: Pull complete +c3d6a96f1ffc: Pull complete +3fe26a83e0ca: Pull complete +3d3a7dd3a3b1: Pull complete +f8cc938abe5f: Pull complete +9978b75f7a58: Pull complete +4d4dbcc8f8cc: Pull complete +8b130a9baa49: Pull complete +6b9611650a73: Pull complete +5df5aac51927: Pull complete +76eea4448d9b: Pull complete +8b66990876c6: Pull complete +f0dd38204b6f: Pull complete +Digest: sha256:7a7fd44a72104bfbd24a77844bad5fabc86485b036f988ea927d1780782a6680 +Status: Downloaded newer image for wurstmeister/zookeeper:latest +docker.io/wurstmeister/zookeeper:latest +2.11-1.0.2: Pulling from wurstmeister/kafka +e7c96db7181b: Pulling fs layer +f910a506b6cb: Pulling fs layer +b6abafe80f63: Pulling fs layer +68cb15346952: Pulling fs layer +e174218f225d: Pulling fs layer +f109a8cf0e78: Pulling fs layer +68cb15346952: Waiting +e174218f225d: Waiting +f109a8cf0e78: Waiting +f910a506b6cb: Verifying Checksum +f910a506b6cb: Download complete +e7c96db7181b: Verifying Checksum +e7c96db7181b: Download complete +e7c96db7181b: Pull complete +68cb15346952: Verifying Checksum +68cb15346952: Download complete +f910a506b6cb: Pull complete +f109a8cf0e78: Verifying Checksum +f109a8cf0e78: Download complete +b6abafe80f63: Verifying Checksum +b6abafe80f63: Download complete +e174218f225d: Verifying Checksum +e174218f225d: Download complete +b6abafe80f63: Pull complete +68cb15346952: Pull complete +e174218f225d: Pull complete +f109a8cf0e78: Pull complete +Digest: sha256:04fbc9021e73b6bc1fe3ecdfabaf5a66dbd6ef0b0e7c98c83ce5610d702304e9 +Status: Downloaded newer image for wurstmeister/kafka:2.11-1.0.2 +docker.io/wurstmeister/kafka:2.11-1.0.2 +Using default tag: latest +latest: Pulling from library/mongo +35807b77a593: Pulling fs layer +664b0ebdcc07: Pulling fs layer +d598f4d3c081: Pulling fs layer +291455135b00: Pulling fs layer +b46409342f13: Pulling fs layer +ff2b9c6e6f3a: Pulling fs layer +149f6335fc27: Pulling fs layer +baeb6f3bec76: Pulling fs layer +8617caab2de5: Pulling fs layer +067d70de7828: Pulling fs layer +291455135b00: Waiting +b46409342f13: Waiting +ff2b9c6e6f3a: Waiting +149f6335fc27: Waiting +baeb6f3bec76: Waiting +8617caab2de5: Waiting +067d70de7828: Waiting +664b0ebdcc07: Verifying Checksum +664b0ebdcc07: Download complete +d598f4d3c081: Verifying Checksum +d598f4d3c081: Download complete +b46409342f13: Verifying Checksum +b46409342f13: Download complete +35807b77a593: Verifying Checksum +35807b77a593: Download complete +291455135b00: Verifying Checksum +291455135b00: Download complete +ff2b9c6e6f3a: Verifying Checksum +ff2b9c6e6f3a: Download complete +149f6335fc27: Verifying Checksum +149f6335fc27: Download complete +8617caab2de5: Verifying Checksum +8617caab2de5: Download complete +067d70de7828: Verifying Checksum +067d70de7828: Download complete +35807b77a593: Pull complete +664b0ebdcc07: Pull complete +baeb6f3bec76: Verifying Checksum +baeb6f3bec76: Download complete +d598f4d3c081: Pull complete +291455135b00: Pull complete +b46409342f13: Pull complete +ff2b9c6e6f3a: Pull complete +149f6335fc27: Pull complete +baeb6f3bec76: Pull complete +8617caab2de5: Pull complete +067d70de7828: Pull complete +Digest: sha256:58ea1bc09f269a9b85b7e1fae83b7505952aaa521afaaca4131f558955743842 +Status: Downloaded newer image for mongo:latest +docker.io/library/mongo:latest +v2.4.3: Pulling from prom/prometheus +8c5a7da1afbc: Pulling fs layer +cab0dd93492f: Pulling fs layer +21399993eeff: Pulling fs layer +50fe4c0f18ae: Pulling fs layer +bab8b4ed1fef: Pulling fs layer +5bd3aaf93e52: Pulling fs layer +ccb08d41de26: Pulling fs layer +51047cce385d: Pulling fs layer +51c32e598e5c: Pulling fs layer +50fe4c0f18ae: Waiting +bab8b4ed1fef: Waiting +5bd3aaf93e52: Waiting +51c32e598e5c: Waiting +ccb08d41de26: Waiting +51047cce385d: Waiting +8c5a7da1afbc: Verifying Checksum +8c5a7da1afbc: Download complete +cab0dd93492f: Verifying Checksum +cab0dd93492f: Download complete +bab8b4ed1fef: Verifying Checksum +bab8b4ed1fef: Download complete +21399993eeff: Verifying Checksum +21399993eeff: Download complete +5bd3aaf93e52: Verifying Checksum +5bd3aaf93e52: Download complete +50fe4c0f18ae: Verifying Checksum +50fe4c0f18ae: Download complete +8c5a7da1afbc: Pull complete +ccb08d41de26: Verifying Checksum +ccb08d41de26: Download complete +51047cce385d: Verifying Checksum +51047cce385d: Download complete +51c32e598e5c: Verifying Checksum +51c32e598e5c: Download complete +cab0dd93492f: Pull complete +21399993eeff: Pull complete +50fe4c0f18ae: Pull complete +bab8b4ed1fef: Pull complete +5bd3aaf93e52: Pull complete +ccb08d41de26: Pull complete +51047cce385d: Pull complete +51c32e598e5c: Pull complete +Digest: sha256:2d79525389d68a309db843c1888f364823afbbef32ffea4741024d2ab9994dd6 +Status: Downloaded newer image for prom/prometheus:v2.4.3 +docker.io/prom/prometheus:v2.4.3 +latest: Pulling from google/cadvisor +ff3a5c916c92: Already exists +44a45bb65cdf: Pulling fs layer +0bbe1a2fe2a6: Pulling fs layer +0bbe1a2fe2a6: Verifying Checksum +0bbe1a2fe2a6: Download complete +44a45bb65cdf: Verifying Checksum +44a45bb65cdf: Download complete +44a45bb65cdf: Pull complete +0bbe1a2fe2a6: Pull complete +Digest: sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04 +Status: Downloaded newer image for google/cadvisor:latest +docker.io/google/cadvisor:latest +latest: Pulling from grafana/grafana +540db60ca938: Pulling fs layer +475d6aa6cde2: Pulling fs layer +86c565d1875f: Pulling fs layer +bacbab00d598: Pulling fs layer +eba2484373d9: Pulling fs layer +4f4fb700ef54: Pulling fs layer +de780c7f2383: Pulling fs layer +40175e15d294: Pulling fs layer +bacbab00d598: Waiting +eba2484373d9: Waiting +4f4fb700ef54: Waiting +40175e15d294: Waiting +de780c7f2383: Waiting +475d6aa6cde2: Verifying Checksum +475d6aa6cde2: Download complete +86c565d1875f: Verifying Checksum +86c565d1875f: Download complete +540db60ca938: Verifying Checksum +540db60ca938: Pull complete +4f4fb700ef54: Verifying Checksum +4f4fb700ef54: Download complete +bacbab00d598: Verifying Checksum +bacbab00d598: Download complete +475d6aa6cde2: Pull complete +de780c7f2383: Verifying Checksum +de780c7f2383: Download complete +40175e15d294: Verifying Checksum +40175e15d294: Download complete +eba2484373d9: Verifying Checksum +eba2484373d9: Download complete +86c565d1875f: Pull complete +bacbab00d598: Pull complete +eba2484373d9: Pull complete +4f4fb700ef54: Pull complete +de780c7f2383: Pull complete +40175e15d294: Pull complete +Digest: sha256:811ee7d685fe45e5625928716d189c518f2b96edaa86122a04cc6faf1e988180 +Status: Downloaded newer image for grafana/grafana:latest +docker.io/grafana/grafana:latest +10: Pulling from library/mariadb +35807b77a593: Already exists +7275e59ecb3d: Pulling fs layer +e8aad5ad91b4: Pulling fs layer +c9acfbaed0bf: Pulling fs layer +c0eb3de6044a: Pulling fs layer +bc1fe3865c9c: Pulling fs layer +63117ccbd0ec: Pulling fs layer +91c9aaf2ea87: Pulling fs layer +2118d7479e34: Pulling fs layer +6bd89e50398a: Pulling fs layer +63117ccbd0ec: Waiting +91c9aaf2ea87: Waiting +2118d7479e34: Waiting +6bd89e50398a: Waiting +c0eb3de6044a: Waiting +bc1fe3865c9c: Waiting +7275e59ecb3d: Download complete +c9acfbaed0bf: Verifying Checksum +c9acfbaed0bf: Download complete +e8aad5ad91b4: Download complete +7275e59ecb3d: Pull complete +c0eb3de6044a: Verifying Checksum +c0eb3de6044a: Download complete +63117ccbd0ec: Verifying Checksum +63117ccbd0ec: Download complete +bc1fe3865c9c: Verifying Checksum +bc1fe3865c9c: Download complete +91c9aaf2ea87: Verifying Checksum +91c9aaf2ea87: Download complete +e8aad5ad91b4: Pull complete +6bd89e50398a: Verifying Checksum +6bd89e50398a: Download complete +c9acfbaed0bf: Pull complete +c0eb3de6044a: Pull complete +2118d7479e34: Verifying Checksum +2118d7479e34: Download complete +bc1fe3865c9c: Pull complete +63117ccbd0ec: Pull complete +91c9aaf2ea87: Pull complete +2118d7479e34: Pull complete +6bd89e50398a: Pull complete +Digest: sha256:4bbee12b1adf299211f844ebbe89563675c46965470dcfa40f5278d63c56d030 +Status: Downloaded newer image for mariadb:10 +docker.io/library/mariadb:10 +5: Pulling from library/mysql +a330b6cecb98: Pulling fs layer +9c8f656c32b8: Pulling fs layer +88e473c3f553: Pulling fs layer +062463ea5d2f: Pulling fs layer +daf7e3bdf4b6: Pulling fs layer +1839c0b7aac9: Pulling fs layer +cf0a0cfee6d0: Pulling fs layer +fae7a809788c: Pulling fs layer +dae5a82a61f0: Pulling fs layer +7063da9569eb: Pulling fs layer +51a9a9b4ef36: Pulling fs layer +1839c0b7aac9: Waiting +cf0a0cfee6d0: Waiting +fae7a809788c: Waiting +dae5a82a61f0: Waiting +7063da9569eb: Waiting +51a9a9b4ef36: Waiting +062463ea5d2f: Waiting +daf7e3bdf4b6: Waiting +9c8f656c32b8: Verifying Checksum +9c8f656c32b8: Download complete +88e473c3f553: Verifying Checksum +88e473c3f553: Download complete +062463ea5d2f: Verifying Checksum +062463ea5d2f: Download complete +a330b6cecb98: Verifying Checksum +a330b6cecb98: Download complete +daf7e3bdf4b6: Verifying Checksum +daf7e3bdf4b6: Download complete +cf0a0cfee6d0: Verifying Checksum +cf0a0cfee6d0: Download complete +fae7a809788c: Verifying Checksum +fae7a809788c: Download complete +1839c0b7aac9: Verifying Checksum +1839c0b7aac9: Download complete +7063da9569eb: Verifying Checksum +7063da9569eb: Download complete +51a9a9b4ef36: Verifying Checksum +51a9a9b4ef36: Download complete +dae5a82a61f0: Verifying Checksum +dae5a82a61f0: Download complete +a330b6cecb98: Pull complete +9c8f656c32b8: Pull complete +88e473c3f553: Pull complete +062463ea5d2f: Pull complete +daf7e3bdf4b6: Pull complete +1839c0b7aac9: Pull complete +cf0a0cfee6d0: Pull complete +fae7a809788c: Pull complete +dae5a82a61f0: Pull complete +7063da9569eb: Pull complete +51a9a9b4ef36: Pull complete +Digest: sha256:d9b934cdf6826629f8d02ea01f28b2c4ddb1ae27c32664b14867324b3e5e1291 +Status: Downloaded newer image for mysql:5 +docker.io/library/mysql:5 +Pulling OSM docker images +Pulling opensourcemano/mon:10 docker image +10: Pulling from opensourcemano/mon +c549ccf8d472: Pulling fs layer +24b1ce98fe2c: Pulling fs layer +17a1f8ed7bac: Pulling fs layer +ef86da65f6f8: Pulling fs layer +04a6b6e92a44: Pulling fs layer +25e42a5d110d: Pulling fs layer +06b2c9a2dfdd: Pulling fs layer +b3b5842693b5: Pulling fs layer +355a15f64cd9: Pulling fs layer +e12aed2e1874: Pulling fs layer +4b4ce96f6770: Pulling fs layer +f9660a89fb09: Pulling fs layer +ef86da65f6f8: Waiting +04a6b6e92a44: Waiting +25e42a5d110d: Waiting +06b2c9a2dfdd: Waiting +b3b5842693b5: Waiting +355a15f64cd9: Waiting +e12aed2e1874: Waiting +4b4ce96f6770: Waiting +f9660a89fb09: Waiting +17a1f8ed7bac: Verifying Checksum +17a1f8ed7bac: Download complete +ef86da65f6f8: Download complete +c549ccf8d472: Verifying Checksum +c549ccf8d472: Download complete +24b1ce98fe2c: Verifying Checksum +24b1ce98fe2c: Download complete +25e42a5d110d: Verifying Checksum +25e42a5d110d: Download complete +06b2c9a2dfdd: Verifying Checksum +06b2c9a2dfdd: Download complete +b3b5842693b5: Verifying Checksum +b3b5842693b5: Download complete +355a15f64cd9: Verifying Checksum +355a15f64cd9: Download complete +04a6b6e92a44: Verifying Checksum +04a6b6e92a44: Download complete +f9660a89fb09: Verifying Checksum +f9660a89fb09: Download complete +e12aed2e1874: Verifying Checksum +e12aed2e1874: Download complete +4b4ce96f6770: Verifying Checksum +4b4ce96f6770: Download complete +c549ccf8d472: Pull complete +24b1ce98fe2c: Pull complete +17a1f8ed7bac: Pull complete +ef86da65f6f8: Pull complete +04a6b6e92a44: Pull complete +25e42a5d110d: Pull complete +06b2c9a2dfdd: Pull complete +b3b5842693b5: Pull complete +355a15f64cd9: Pull complete +e12aed2e1874: Pull complete +4b4ce96f6770: Pull complete +f9660a89fb09: Pull complete +Digest: sha256:c06b2f8a2b81ea375cd736ecf610b4850671229e448577e1f587692dff85eca3 +Status: Downloaded newer image for opensourcemano/mon:10 +docker.io/opensourcemano/mon:10 +Pulling opensourcemano/pol:10 docker image +10: Pulling from opensourcemano/pol +c549ccf8d472: Already exists +24b1ce98fe2c: Already exists +d4f1f30a4543: Pulling fs layer +98fdfbece9ca: Pulling fs layer +a63bb2b0c9e7: Pulling fs layer +35c56380d886: Pulling fs layer +d52da327ba85: Pulling fs layer +3847c3f71d7d: Pulling fs layer +faa0c4c7e65f: Pulling fs layer +35130fe55c9d: Pulling fs layer +745b29b6dd7e: Pulling fs layer +3847c3f71d7d: Waiting +faa0c4c7e65f: Waiting +35c56380d886: Waiting +d52da327ba85: Waiting +35130fe55c9d: Waiting +745b29b6dd7e: Waiting +a63bb2b0c9e7: Verifying Checksum +a63bb2b0c9e7: Download complete +d4f1f30a4543: Verifying Checksum +d4f1f30a4543: Download complete +98fdfbece9ca: Verifying Checksum +98fdfbece9ca: Download complete +d4f1f30a4543: Pull complete +35c56380d886: Verifying Checksum +35c56380d886: Download complete +d52da327ba85: Verifying Checksum +d52da327ba85: Download complete +3847c3f71d7d: Verifying Checksum +3847c3f71d7d: Download complete +35130fe55c9d: Verifying Checksum +35130fe55c9d: Download complete +faa0c4c7e65f: Verifying Checksum +faa0c4c7e65f: Download complete +745b29b6dd7e: Verifying Checksum +745b29b6dd7e: Download complete +98fdfbece9ca: Pull complete +a63bb2b0c9e7: Pull complete +35c56380d886: Pull complete +d52da327ba85: Pull complete +3847c3f71d7d: Pull complete +faa0c4c7e65f: Pull complete +35130fe55c9d: Pull complete +745b29b6dd7e: Pull complete +Digest: sha256:258e14b73c0236121b8a8775b122158b60c3934f31c84fd1d2cf4bc4cdc4733d +Status: Downloaded newer image for opensourcemano/pol:10 +docker.io/opensourcemano/pol:10 +Pulling opensourcemano/nbi:10 docker image +10: Pulling from opensourcemano/nbi +25fa05cd42bd: Pulling fs layer +79b82fad1be8: Pulling fs layer +96431a0568cc: Pulling fs layer +4469f60df593: Pulling fs layer +13c463cb29f2: Pulling fs layer +2b191ad5b2e2: Pulling fs layer +da5ef09f49dd: Pulling fs layer +8c0691757a83: Pulling fs layer +823d3d9d067d: Pulling fs layer +2b191ad5b2e2: Waiting +da5ef09f49dd: Waiting +8c0691757a83: Waiting +823d3d9d067d: Waiting +4469f60df593: Waiting +13c463cb29f2: Waiting +96431a0568cc: Verifying Checksum +96431a0568cc: Download complete +25fa05cd42bd: Verifying Checksum +25fa05cd42bd: Download complete +13c463cb29f2: Verifying Checksum +13c463cb29f2: Download complete +79b82fad1be8: Verifying Checksum +79b82fad1be8: Download complete +4469f60df593: Verifying Checksum +4469f60df593: Download complete +2b191ad5b2e2: Verifying Checksum +2b191ad5b2e2: Download complete +da5ef09f49dd: Verifying Checksum +da5ef09f49dd: Download complete +8c0691757a83: Verifying Checksum +8c0691757a83: Download complete +823d3d9d067d: Verifying Checksum +823d3d9d067d: Download complete +25fa05cd42bd: Pull complete +79b82fad1be8: Pull complete +96431a0568cc: Pull complete +4469f60df593: Pull complete +13c463cb29f2: Pull complete +2b191ad5b2e2: Pull complete +da5ef09f49dd: Pull complete +8c0691757a83: Pull complete +823d3d9d067d: Pull complete +Digest: sha256:81acf9b529479b8f3791321df5e4e888489254f94971dba8c5c7982f14448c53 +Status: Downloaded newer image for opensourcemano/nbi:10 +docker.io/opensourcemano/nbi:10 +Pulling opensourcemano/keystone:10 docker image +10: Pulling from opensourcemano/keystone +61e03ba1d414: Pulling fs layer +4afb39f216bd: Pulling fs layer +e489abdc9f90: Pulling fs layer +999fff7bcc24: Pulling fs layer +b15ac7627c4a: Pulling fs layer +56182eb8fb75: Pulling fs layer +999fff7bcc24: Waiting +b15ac7627c4a: Waiting +6a8ed178545d: Pulling fs layer +56182eb8fb75: Waiting +6a8ed178545d: Waiting +e489abdc9f90: Verifying Checksum +e489abdc9f90: Download complete +4afb39f216bd: Verifying Checksum +4afb39f216bd: Download complete +999fff7bcc24: Verifying Checksum +999fff7bcc24: Download complete +b15ac7627c4a: Verifying Checksum +b15ac7627c4a: Download complete +56182eb8fb75: Verifying Checksum +56182eb8fb75: Download complete +61e03ba1d414: Verifying Checksum +61e03ba1d414: Download complete +6a8ed178545d: Verifying Checksum +6a8ed178545d: Download complete +61e03ba1d414: Pull complete +4afb39f216bd: Pull complete +e489abdc9f90: Pull complete +999fff7bcc24: Pull complete +b15ac7627c4a: Pull complete +56182eb8fb75: Pull complete +6a8ed178545d: Pull complete +Digest: sha256:e204b544882350e30d3f09dc0bf7de817294896415315c850d6fdb5e9b32576e +Status: Downloaded newer image for opensourcemano/keystone:10 +docker.io/opensourcemano/keystone:10 +Pulling opensourcemano/ro:10 docker image +10: Pulling from opensourcemano/ro +25fa05cd42bd: Already exists +79b82fad1be8: Already exists +d2159c280060: Pulling fs layer +f9cb520b2003: Pulling fs layer +8c6e0c57135a: Pulling fs layer +6e674e0ff67f: Pulling fs layer +6e674e0ff67f: Waiting +8c6e0c57135a: Verifying Checksum +8c6e0c57135a: Download complete +d2159c280060: Verifying Checksum +d2159c280060: Download complete +6e674e0ff67f: Verifying Checksum +6e674e0ff67f: Download complete +d2159c280060: Pull complete +f9cb520b2003: Verifying Checksum +f9cb520b2003: Download complete +f9cb520b2003: Pull complete +8c6e0c57135a: Pull complete +6e674e0ff67f: Pull complete +Digest: sha256:340d9491692fe11d65e32ef10c224636acdb4dfc20b7222c682fd7084dc53375 +Status: Downloaded newer image for opensourcemano/ro:10 +docker.io/opensourcemano/ro:10 +Pulling opensourcemano/lcm:10 docker image +10: Pulling from opensourcemano/lcm +25fa05cd42bd: Already exists +79b82fad1be8: Already exists +fbf7a10e9df7: Pulling fs layer +7a3694938e7e: Pulling fs layer +eca3a26fd648: Pulling fs layer +db0ea8046578: Pulling fs layer +ede9fdd64d0c: Pulling fs layer +a0d53757f978: Pulling fs layer +63d2e86f6d2a: Pulling fs layer +5e1fea429abe: Pulling fs layer +1a9eb93b9672: Pulling fs layer +95e2fe1e01d4: Pulling fs layer +1361214bf256: Pulling fs layer +db0ea8046578: Waiting +ede9fdd64d0c: Waiting +a0d53757f978: Waiting +63d2e86f6d2a: Waiting +5e1fea429abe: Waiting +1a9eb93b9672: Waiting +95e2fe1e01d4: Waiting +1361214bf256: Waiting +fbf7a10e9df7: Verifying Checksum +fbf7a10e9df7: Download complete +7a3694938e7e: Verifying Checksum +7a3694938e7e: Download complete +fbf7a10e9df7: Pull complete +eca3a26fd648: Verifying Checksum +eca3a26fd648: Download complete +ede9fdd64d0c: Verifying Checksum +ede9fdd64d0c: Download complete +db0ea8046578: Verifying Checksum +db0ea8046578: Download complete +7a3694938e7e: Pull complete +63d2e86f6d2a: Verifying Checksum +63d2e86f6d2a: Download complete +5e1fea429abe: Verifying Checksum +5e1fea429abe: Download complete +a0d53757f978: Verifying Checksum +a0d53757f978: Download complete +1a9eb93b9672: Verifying Checksum +1a9eb93b9672: Download complete +1361214bf256: Verifying Checksum +1361214bf256: Download complete +95e2fe1e01d4: Verifying Checksum +95e2fe1e01d4: Download complete +eca3a26fd648: Pull complete +db0ea8046578: Pull complete +ede9fdd64d0c: Pull complete +a0d53757f978: Pull complete +63d2e86f6d2a: Pull complete +5e1fea429abe: Pull complete +1a9eb93b9672: Pull complete +95e2fe1e01d4: Pull complete +1361214bf256: Pull complete +Digest: sha256:a5af752ffe1ae236f5ed7375197018b2fa36b6b2db78aee84e808289c72cbf4d +Status: Downloaded newer image for opensourcemano/lcm:10 +docker.io/opensourcemano/lcm:10 +Pulling opensourcemano/ng-ui:10 docker image +10: Pulling from opensourcemano/ng-ui +25fa05cd42bd: Already exists +6f9d5f011911: Pulling fs layer +ada8c666ede4: Pulling fs layer +0ee405664154: Pulling fs layer +d8388c5773a1: Pulling fs layer +9926096aff79: Pulling fs layer +750b17a20144: Pulling fs layer +2a615abe7a84: Pulling fs layer +9995f9a156ff: Pulling fs layer +ac2b2be25d64: Pulling fs layer +d8388c5773a1: Waiting +9926096aff79: Waiting +750b17a20144: Waiting +2a615abe7a84: Waiting +9995f9a156ff: Waiting +ac2b2be25d64: Waiting +0ee405664154: Verifying Checksum +0ee405664154: Download complete +d8388c5773a1: Verifying Checksum +d8388c5773a1: Download complete +ada8c666ede4: Verifying Checksum +ada8c666ede4: Download complete +750b17a20144: Verifying Checksum +750b17a20144: Download complete +2a615abe7a84: Verifying Checksum +2a615abe7a84: Download complete +6f9d5f011911: Verifying Checksum +6f9d5f011911: Download complete +9995f9a156ff: Verifying Checksum +9995f9a156ff: Download complete +ac2b2be25d64: Verifying Checksum +ac2b2be25d64: Download complete +9926096aff79: Verifying Checksum +9926096aff79: Download complete +6f9d5f011911: Pull complete +ada8c666ede4: Pull complete +0ee405664154: Pull complete +d8388c5773a1: Pull complete +9926096aff79: Pull complete +750b17a20144: Pull complete +2a615abe7a84: Pull complete +9995f9a156ff: Pull complete +ac2b2be25d64: Pull complete +Digest: sha256:bc2f5888f0c9c07f640c538cbbe271ad51fe76430891be6229a41b350ea46393 +Status: Downloaded newer image for opensourcemano/ng-ui:10 +docker.io/opensourcemano/ng-ui:10 +Pulling opensourcemano/osmclient:10 docker image +10: Pulling from opensourcemano/osmclient +25fa05cd42bd: Already exists +79b82fad1be8: Already exists +4671b9843696: Pulling fs layer +347013c6ea3b: Pulling fs layer +4671b9843696: Verifying Checksum +4671b9843696: Download complete +347013c6ea3b: Verifying Checksum +347013c6ea3b: Download complete +4671b9843696: Pull complete +347013c6ea3b: Pull complete +Digest: sha256:0208ef396ded50b6f3216af2f0e88aad0b1df5bf8079f7f43ef712fb6bf74134 +Status: Downloaded newer image for opensourcemano/osmclient:10 +docker.io/opensourcemano/osmclient:10 +Finished pulling and generating docker images +Doing a backup of existing env files +cp: cannot stat '/etc/osm/docker/keystone-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/keystone.env': No such file or directory +cp: cannot stat '/etc/osm/docker/lcm.env': No such file or directory +cp: cannot stat '/etc/osm/docker/mon.env': No such file or directory +cp: cannot stat '/etc/osm/docker/nbi.env': No such file or directory +cp: cannot stat '/etc/osm/docker/pol.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro-db.env': No such file or directory +cp: cannot stat '/etc/osm/docker/ro.env': No such file or directory +Generating docker env files +OSMLCM_DATABASE_COMMONKEY=uy2O3WAplXL00VBmKKS0ZsU2U1aFUsEh +OSMLCM_VCA_HOST=192.168.64.19 +OSMLCM_VCA_SECRET=c19fc42556d3564c762ad1c4bfd6af89 +OSMLCM_VCA_PUBKEY=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCoXJJP6MedQTolqTBaG8kQowozswo07M0tQrgS8GWp1uzPQ7eGwKGaElnserlws9d/lWmOk4M65RM8U5qiK2Q5oLgUoc131B0pl5DBFOPRbdD4/EHSjJ1KhzBSLE/rm9+InMYQ6I7XeXjMWpT5yclEKN3TYBMY0mwg/Z5LmluduD/zmVIvVo512/TXWR2rXCVTvmxz/fHBd2AbK2zBFjOdAbMZ1P9gLgfaTw5tid5tc2KzoTEjGDX94/rOXe3d8soQbl8oyjdueynyPlwuQ1z9wqB5ez2rQCboC/8XmYpdRHvS1bqiyeh3HmEBU88xFs7TiuJRlPYENmjELXO/Caox juju-client-key +OSMLCM_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5RENDQWx5Z0F3SUJBZ0lWQUpxMjMwWm1WeVQwNHFaTy95SzVwZGtHTS9XWE1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05NakV3T1RBMgpNVGd5TWpNd1doY05NekV3T1RBMk1UZ3lOek13V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMHF6clJKaUIKcUx6eXZWYTZvUzhnRnhEcCtCTWVUbGFnRnBnQ2lHZk1kajdER0JlTTBxWHVhNWdxdlFORGtTQW5XNnRYOTRqNAo5RlIxR3hyS2RQZlkvZDJuN25YMmpneEd2Y2pwR1oxbFNwcWVZZ2dsMW9PWVBsZEdFdnd4QUE3OVYzaTB4QkxSCmdUYzh5dGRLRzlueFMrUk9UanQ4WVgrbzR0S2c5Mm9mOUg1eU1pV2loZjN5bmU4dGhlMVprNGl6OGFkVTd3Q1AKQUJNa1VEVzhTckhBT3JwdVdmQnNTcFhncW5pblJ1Si9rYkEwVDZBMm9sbkFMNWN3Z1V3OWZUeCtwWWJsbUhNdQpyanZsbmhraXNNSy9iZ1dNSURBem9odHVJZEx4N3RrOCswOHVjWGYxdmhsQkJmMmQzY3djTnVnaDZua3pkeFBHCm9qcUpQMVVxUWs0ZEFNSUNsKzRhODlRb2VuN0ZUbm5ab1JRc05aWS9ZYzJKaXBDU2RPQ3V1ZHJqMlNIdnBRazMKMGNISURMeXVOeWk4TzRpSnBHS2RqeWVUKzNydC9RTDFFMUo1Q1FiTEU3TUJZM2E5Uk9xb25aTTFVMEpaZW1oZgpHdHdqYmFzUzZUNU84d0dFZ09taitWVGpYbW0ydGxvNUw3YmFsR2RaNXRYcHVRd3lHR1VxYzBmTkFnTUJBQUdqCkl6QWhNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJnUUFYcHVQTVJ4ZVZuTXZKbGVmbHNnVWxqV2NqRGxObkNHWXV6Q0crRUlxVjBzeU53cXZzbDBwRwpUVXh4bUhUeUlhaHRzR0FRcEFrSXd3YjlvaHJSWk92WDhBWk9lOXEvZlJiMWtaS0VTc3IyNzIvOFVTenlOTGIzCnBtV0RvYmF3dlB5VVJXZFdlWjNUMXEwTklpL2R1VmxiLyszazgyVS9ZWVk4ZFYwQ3FNZkhDa0xiYU9QSENCRkcKSTJKTmNLTkgrbzlJZXVLOW5ubVRrbURTNUZSUlNUc2ZyWTA1ZmlVUGhjS3dNU0xkcERiL1ZtM2Rydlk1QU4yRApvNkRSR1JzK3Y4dkJYbU1taURtZFlHVzVrNUVyd1RpSzVUemtnQ1dMNUhWSE11azBJei9VNEkrL1owNGxPTjhMCmg3azVmZ2diVHQ3ZGJoKzczaStTbkRyRW5FRkMrR25sQ05pVmRVeDlPVkFCNDJsOWllckpwcHU2c2l0V0tZQkEKSUZpKzdVSEFOaHR4WEtnOE13b2lnY1Z0dklmK0ZranpERjFkbjl2aTRuWGQyS0R3N3A1TTMwRlN3d0sya291eQorMXY1VXVLd2dPa1ZMYUYxakhXcUZNb0ozdko2VmxxWmpnWFVtcHgrVHl2YmpTR3RUZUl0YTNubVlWdHBEV3pBCi9kTlpHcUpDK01FPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo= +# OSMLCM_VCA_ENABLEOSUPGRADE=false +# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/ +OSMLCM_VCA_CLOUD=lxd-cloud +OSMLCM_VCA_K8S_CLOUD=k8scloud +MYSQL_ROOT_PASSWORD=doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi +RO_DB_ROOT_PASSWORD=doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi +OSMRO_DATABASE_COMMONKEY=uy2O3WAplXL00VBmKKS0ZsU2U1aFUsEh +MYSQL_ROOT_PASSWORD=doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi +ROOT_DB_PASSWORD=doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi +KEYSTONE_DB_PASSWORD=FYnqJhGn6yLi0SDbA4aaJ2gW5TQfhj0r +SERVICE_PASSWORD=weyQ7wnHtlOo4K0UuUriYPPTrGUexZ82 +OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=weyQ7wnHtlOo4K0UuUriYPPTrGUexZ82 +OSMNBI_DATABASE_COMMONKEY=uy2O3WAplXL00VBmKKS0ZsU2U1aFUsEh +OSMMON_KEYSTONE_SERVICE_PASSWORD=weyQ7wnHtlOo4K0UuUriYPPTrGUexZ82 +OSMMON_DATABASE_COMMONKEY=uy2O3WAplXL00VBmKKS0ZsU2U1aFUsEh +OSMMON_SQL_DATABASE_URI=mysql://root:doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi@mysql:3306/mon +OS_NOTIFIER_URI=http://192.168.64.19:8662 +OSMMON_VCA_HOST=192.168.64.19 +OSMMON_VCA_SECRET=c19fc42556d3564c762ad1c4bfd6af89 +OSMMON_VCA_CACERT=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5RENDQWx5Z0F3SUJBZ0lWQUpxMjMwWm1WeVQwNHFaTy95SzVwZGtHTS9XWE1BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05NakV3T1RBMgpNVGd5TWpNd1doY05NekV3T1RBMk1UZ3lOek13V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMHF6clJKaUIKcUx6eXZWYTZvUzhnRnhEcCtCTWVUbGFnRnBnQ2lHZk1kajdER0JlTTBxWHVhNWdxdlFORGtTQW5XNnRYOTRqNAo5RlIxR3hyS2RQZlkvZDJuN25YMmpneEd2Y2pwR1oxbFNwcWVZZ2dsMW9PWVBsZEdFdnd4QUE3OVYzaTB4QkxSCmdUYzh5dGRLRzlueFMrUk9UanQ4WVgrbzR0S2c5Mm9mOUg1eU1pV2loZjN5bmU4dGhlMVprNGl6OGFkVTd3Q1AKQUJNa1VEVzhTckhBT3JwdVdmQnNTcFhncW5pblJ1Si9rYkEwVDZBMm9sbkFMNWN3Z1V3OWZUeCtwWWJsbUhNdQpyanZsbmhraXNNSy9iZ1dNSURBem9odHVJZEx4N3RrOCswOHVjWGYxdmhsQkJmMmQzY3djTnVnaDZua3pkeFBHCm9qcUpQMVVxUWs0ZEFNSUNsKzRhODlRb2VuN0ZUbm5ab1JRc05aWS9ZYzJKaXBDU2RPQ3V1ZHJqMlNIdnBRazMKMGNISURMeXVOeWk4TzRpSnBHS2RqeWVUKzNydC9RTDFFMUo1Q1FiTEU3TUJZM2E5Uk9xb25aTTFVMEpaZW1oZgpHdHdqYmFzUzZUNU84d0dFZ09taitWVGpYbW0ydGxvNUw3YmFsR2RaNXRYcHVRd3lHR1VxYzBmTkFnTUJBQUdqCkl6QWhNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJnUUFYcHVQTVJ4ZVZuTXZKbGVmbHNnVWxqV2NqRGxObkNHWXV6Q0crRUlxVjBzeU53cXZzbDBwRwpUVXh4bUhUeUlhaHRzR0FRcEFrSXd3YjlvaHJSWk92WDhBWk9lOXEvZlJiMWtaS0VTc3IyNzIvOFVTenlOTGIzCnBtV0RvYmF3dlB5VVJXZFdlWjNUMXEwTklpL2R1VmxiLyszazgyVS9ZWVk4ZFYwQ3FNZkhDa0xiYU9QSENCRkcKSTJKTmNLTkgrbzlJZXVLOW5ubVRrbURTNUZSUlNUc2ZyWTA1ZmlVUGhjS3dNU0xkcERiL1ZtM2Rydlk1QU4yRApvNkRSR1JzK3Y4dkJYbU1taURtZFlHVzVrNUVyd1RpSzVUemtnQ1dMNUhWSE11azBJei9VNEkrL1owNGxPTjhMCmg3azVmZ2diVHQ3ZGJoKzczaStTbkRyRW5FRkMrR25sQ05pVmRVeDlPVkFCNDJsOWllckpwcHU2c2l0V0tZQkEKSUZpKzdVSEFOaHR4WEtnOE13b2lnY1Z0dklmK0ZranpERjFkbjl2aTRuWGQyS0R3N3A1TTMwRlN3d0sya291eQorMXY1VXVLd2dPa1ZMYUYxakhXcUZNb0ozdko2VmxxWmpnWFVtcHgrVHl2YmpTR3RUZUl0YTNubVlWdHBEV3pBCi9kTlpHcUpDK01FPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo= +OSMPOL_SQL_DATABASE_URI=mysql://root:doKzXq1kJ3f60DNTtgR77tuy6iRqHDDi@mysql:3306/pol +Finished generation of docker env files +Added 'osm' model on k8scloud with credential 'k8scloud' for user 'admin' +Located charm "mongodb-k8s" in charm-hub, revision 1 +Deploying "mongodb-k8s" from charm-hub charm "mongodb-k8s", revision 1 in channel stable +Error from server (AlreadyExists): namespaces "osm" already exists +secret/lcm-secret created +secret/mon-secret created +secret/nbi-secret created +secret/ro-db-secret created +secret/ro-secret created +secret/keystone-secret created +secret/pol-secret created +clusterrole.rbac.authorization.k8s.io/grafana-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/grafana-clusterrolebinding created +secret/grafana created +serviceaccount/grafana created +configmap/grafana-dashboard-provider created +configmap/grafana-datasource created +configmap/grafana created +deployment.apps/grafana created +service/grafana created +service/kafka created +statefulset.apps/kafka created +service/keystone created +deployment.apps/keystone created +deployment.apps/lcm created +service/mon created +deployment.apps/mon created +service/mysql created +statefulset.apps/mysql created +service/nbi created +deployment.apps/nbi created +service/ng-ui created +deployment.apps/ng-ui created +deployment.apps/pol created +service/prometheus created +configmap/prom created +statefulset.apps/prometheus created +service/ro created +deployment.apps/ro created +service/zookeeper created +statefulset.apps/zookeeper created +sed: can't read /etc/osm/docker/osm_pla/pla.yaml: No such file or directory +error: the path "/etc/osm/docker/osm_pla" does not exist + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 3120 100 3120 0 0 6638 0 --:--:-- --:--:-- --:--:-- 6948 +Warning: apt-key output should not be parsed (stdout is not a terminal) +OK +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Get:3 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Hit:5 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:6 https://download.docker.com/linux/ubuntu bionic InRelease +Hit:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Get:8 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/IM amd64 Packages [901 B] +Get:9 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/osmclient amd64 Packages [473 B] +Fetched 253 kB in 6s (41.9 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB] +Hit:3 https://download.docker.com/linux/ubuntu bionic InRelease +Get:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB] +Get:5 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] +Hit:6 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease +Hit:7 https://packages.cloud.google.com/apt kubernetes-xenial InRelease +Fetched 252 kB in 3s (83.2 kB/s) +Reading package lists... +W: Conflicting distribution: https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable InRelease (expected stable but got ) +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-7 + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan4 libatomic1 libbinutils libc-dev-bin libc6-dev libcc1-0 libcilkrts5 + libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-7-dev + libgomp1 libisl19 libitm1 liblsan0 libmpc3 libmpx2 libpython3-dev + libpython3.6-dev libquadmath0 libstdc++-7-dev libtsan0 libubsan0 + linux-libc-dev make manpages-dev python-pip-whl python3-crypto python3-dev + python3-distutils python3-keyring python3-keyrings.alt python3-lib2to3 + python3-secretstorage python3-setuptools python3-wheel python3-xdg + python3.6-dev +Suggested packages: + binutils-doc cpp-doc gcc-7-locales debian-keyring g++-multilib + g++-7-multilib gcc-7-doc libstdc++6-7-dbg gcc-multilib autoconf automake + libtool flex bison gdb gcc-doc gcc-7-multilib libgcc1-dbg libgomp1-dbg + libitm1-dbg libatomic1-dbg libasan4-dbg liblsan0-dbg libtsan0-dbg + libubsan0-dbg libcilkrts5-dbg libmpx2-dbg libquadmath0-dbg glibc-doc bzr + libstdc++-7-doc make-doc python-crypto-doc gnome-keyring libkf5wallet-bin + gir1.2-gnomekeyring-1.0 python-secretstorage-doc python-setuptools-doc +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-7 + dh-python dpkg-dev fakeroot g++ g++-7 gcc gcc-7 gcc-7-base + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan4 libatomic1 libbinutils libc-dev-bin libc6-dev libcc1-0 libcilkrts5 + libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-7-dev + libgomp1 libisl19 libitm1 liblsan0 libmpc3 libmpx2 libpython3-dev + libpython3.6-dev libquadmath0 libstdc++-7-dev libtsan0 libubsan0 + linux-libc-dev make manpages-dev python-pip-whl python3-crypto python3-dev + python3-distutils python3-keyring python3-keyrings.alt python3-lib2to3 + python3-pip python3-secretstorage python3-setuptools python3-wheel + python3-xdg python3.6-dev +0 upgraded, 57 newly installed, 0 to remove and 7 not upgraded. +Need to get 91.2 MB of archives. +After this operation, 253 MB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-common amd64 2.30-21ubuntu1~18.04.5 [197 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libbinutils amd64 2.30-21ubuntu1~18.04.5 [489 kB] +Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.30-21ubuntu1~18.04.5 [1839 kB] +Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 binutils amd64 2.30-21ubuntu1~18.04.5 [3388 B] +Get:5 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc-dev-bin amd64 2.27-3ubuntu1.4 [71.8 kB] +Get:6 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 linux-libc-dev amd64 4.15.0-154.161 [988 kB] +Get:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libc6-dev amd64 2.27-3ubuntu1.4 [2585 kB] +Get:8 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7-base amd64 7.5.0-3ubuntu1~18.04 [18.3 kB] +Get:9 http://archive.ubuntu.com/ubuntu bionic/main amd64 libisl19 amd64 0.19-1 [551 kB] +Get:10 http://archive.ubuntu.com/ubuntu bionic/main amd64 libmpc3 amd64 1.1.0-1 [40.8 kB] +Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp-7 amd64 7.5.0-3ubuntu1~18.04 [8591 kB] +Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 cpp amd64 4:7.4.0-1ubuntu2.3 [27.7 kB] +Get:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcc1-0 amd64 8.4.0-1ubuntu1~18.04 [39.4 kB] +Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgomp1 amd64 8.4.0-1ubuntu1~18.04 [76.5 kB] +Get:15 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libitm1 amd64 8.4.0-1ubuntu1~18.04 [27.9 kB] +Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libatomic1 amd64 8.4.0-1ubuntu1~18.04 [9192 B] +Get:17 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libasan4 amd64 7.5.0-3ubuntu1~18.04 [358 kB] +Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 liblsan0 amd64 8.4.0-1ubuntu1~18.04 [133 kB] +Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libtsan0 amd64 8.4.0-1ubuntu1~18.04 [288 kB] +Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libubsan0 amd64 7.5.0-3ubuntu1~18.04 [126 kB] +Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcilkrts5 amd64 7.5.0-3ubuntu1~18.04 [42.5 kB] +Get:22 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmpx2 amd64 8.4.0-1ubuntu1~18.04 [11.6 kB] +Get:23 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libquadmath0 amd64 8.4.0-1ubuntu1~18.04 [134 kB] +Get:24 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libgcc-7-dev amd64 7.5.0-3ubuntu1~18.04 [2378 kB] +Get:25 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc-7 amd64 7.5.0-3ubuntu1~18.04 [9381 kB] +Get:26 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 gcc amd64 4:7.4.0-1ubuntu2.3 [5184 B] +Get:27 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libstdc++-7-dev amd64 7.5.0-3ubuntu1~18.04 [1471 kB] +Get:28 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++-7 amd64 7.5.0-3ubuntu1~18.04 [9697 kB] +Get:29 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 g++ amd64 4:7.4.0-1ubuntu2.3 [1568 B] +Get:30 http://archive.ubuntu.com/ubuntu bionic/main amd64 make amd64 4.1-9.1ubuntu1 [154 kB] +Get:31 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libdpkg-perl all 1.19.0.5ubuntu2.3 [211 kB] +Get:32 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 dpkg-dev all 1.19.0.5ubuntu2.3 [607 kB] +Get:33 http://archive.ubuntu.com/ubuntu bionic/main amd64 build-essential amd64 12.4ubuntu1 [4758 B] +Get:34 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-lib2to3 all 3.6.9-1~18.04 [77.4 kB] +Get:35 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-distutils all 3.6.9-1~18.04 [144 kB] +Get:36 http://archive.ubuntu.com/ubuntu bionic/main amd64 dh-python all 3.20180325ubuntu2 [89.2 kB] +Get:37 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfakeroot amd64 1.22-2ubuntu1 [25.9 kB] +Get:38 http://archive.ubuntu.com/ubuntu bionic/main amd64 fakeroot amd64 1.22-2ubuntu1 [62.3 kB] +Get:39 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47.6 kB] +Get:40 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-xs-perl amd64 0.04-5 [11.1 kB] +Get:41 http://archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-merge-perl all 0.08-3 [12.0 kB] +Get:42 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libexpat1-dev amd64 2.2.5-3ubuntu0.2 [122 kB] +Get:43 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfile-fcntllock-perl amd64 0.22-3build2 [33.2 kB] +Get:44 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3.6-dev amd64 3.6.9-1~18.04ubuntu1.4 [44.9 MB] +Get:45 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libpython3-dev amd64 3.6.7-1~18.04 [7328 B] +Get:46 http://archive.ubuntu.com/ubuntu bionic/main amd64 manpages-dev all 4.15-1 [2217 kB] +Get:47 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python-pip-whl all 9.0.1-2.3~ubuntu1.18.04.5 [1653 kB] +Get:48 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-crypto amd64 2.6.1-8ubuntu2 [244 kB] +Get:49 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3.6-dev amd64 3.6.9-1~18.04ubuntu1.4 [508 kB] +Get:50 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-dev amd64 3.6.7-1~18.04 [1288 B] +Get:51 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-secretstorage all 2.3.1-2 [12.1 kB] +Get:52 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyring all 10.6.0-1 [26.7 kB] +Get:53 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-keyrings.alt all 3.0-1 [16.6 kB] +Get:54 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 python3-pip all 9.0.1-2.3~ubuntu1.18.04.5 [114 kB] +Get:55 http://archive.ubuntu.com/ubuntu bionic/main amd64 python3-setuptools all 39.0.1-2 [248 kB] +Get:56 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python3-wheel all 0.30.0-0.2 [36.5 kB] +Get:57 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 python3-xdg all 0.25-4ubuntu1.1 [31.3 kB] +Fetched 91.2 MB in 9s (10.3 MB/s) +Selecting previously unselected package binutils-common:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 61768 files and directories currently installed.) +Preparing to unpack .../00-binutils-common_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils-common:amd64 (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../01-libbinutils_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking libbinutils:amd64 (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../02-binutils-x86-64-linux-gnu_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package binutils. +Preparing to unpack .../03-binutils_2.30-21ubuntu1~18.04.5_amd64.deb ... +Unpacking binutils (2.30-21ubuntu1~18.04.5) ... +Selecting previously unselected package libc-dev-bin. +Preparing to unpack .../04-libc-dev-bin_2.27-3ubuntu1.4_amd64.deb ... +Unpacking libc-dev-bin (2.27-3ubuntu1.4) ... +Selecting previously unselected package linux-libc-dev:amd64. +Preparing to unpack .../05-linux-libc-dev_4.15.0-154.161_amd64.deb ... +Unpacking linux-libc-dev:amd64 (4.15.0-154.161) ... +Selecting previously unselected package libc6-dev:amd64. +Preparing to unpack .../06-libc6-dev_2.27-3ubuntu1.4_amd64.deb ... +Unpacking libc6-dev:amd64 (2.27-3ubuntu1.4) ... +Selecting previously unselected package gcc-7-base:amd64. +Preparing to unpack .../07-gcc-7-base_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libisl19:amd64. +Preparing to unpack .../08-libisl19_0.19-1_amd64.deb ... +Unpacking libisl19:amd64 (0.19-1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../09-libmpc3_1.1.0-1_amd64.deb ... +Unpacking libmpc3:amd64 (1.1.0-1) ... +Selecting previously unselected package cpp-7. +Preparing to unpack .../10-cpp-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking cpp-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package cpp. +Preparing to unpack .../11-cpp_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking cpp (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../12-libcc1-0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../13-libgomp1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../14-libitm1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../15-libatomic1_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libasan4:amd64. +Preparing to unpack .../16-libasan4_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../17-liblsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libtsan0:amd64. +Preparing to unpack .../18-libtsan0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libubsan0:amd64. +Preparing to unpack .../19-libubsan0_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libcilkrts5:amd64. +Preparing to unpack .../20-libcilkrts5_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package libmpx2:amd64. +Preparing to unpack .../21-libmpx2_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../22-libquadmath0_8.4.0-1ubuntu1~18.04_amd64.deb ... +Unpacking libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Selecting previously unselected package libgcc-7-dev:amd64. +Preparing to unpack .../23-libgcc-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc-7. +Preparing to unpack .../24-gcc-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking gcc-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package gcc. +Preparing to unpack .../25-gcc_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking gcc (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package libstdc++-7-dev:amd64. +Preparing to unpack .../26-libstdc++-7-dev_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++-7. +Preparing to unpack .../27-g++-7_7.5.0-3ubuntu1~18.04_amd64.deb ... +Unpacking g++-7 (7.5.0-3ubuntu1~18.04) ... +Selecting previously unselected package g++. +Preparing to unpack .../28-g++_4%3a7.4.0-1ubuntu2.3_amd64.deb ... +Unpacking g++ (4:7.4.0-1ubuntu2.3) ... +Selecting previously unselected package make. +Preparing to unpack .../29-make_4.1-9.1ubuntu1_amd64.deb ... +Unpacking make (4.1-9.1ubuntu1) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../30-libdpkg-perl_1.19.0.5ubuntu2.3_all.deb ... +Unpacking libdpkg-perl (1.19.0.5ubuntu2.3) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../31-dpkg-dev_1.19.0.5ubuntu2.3_all.deb ... +Unpacking dpkg-dev (1.19.0.5ubuntu2.3) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../32-build-essential_12.4ubuntu1_amd64.deb ... +Unpacking build-essential (12.4ubuntu1) ... +Selecting previously unselected package python3-lib2to3. +Preparing to unpack .../33-python3-lib2to3_3.6.9-1~18.04_all.deb ... +Unpacking python3-lib2to3 (3.6.9-1~18.04) ... +Selecting previously unselected package python3-distutils. +Preparing to unpack .../34-python3-distutils_3.6.9-1~18.04_all.deb ... +Unpacking python3-distutils (3.6.9-1~18.04) ... +Selecting previously unselected package dh-python. +Preparing to unpack .../35-dh-python_3.20180325ubuntu2_all.deb ... +Unpacking dh-python (3.20180325ubuntu2) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../36-libfakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.22-2ubuntu1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../37-fakeroot_1.22-2ubuntu1_amd64.deb ... +Unpacking fakeroot (1.22-2ubuntu1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../38-libalgorithm-diff-perl_1.19.03-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.19.03-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl. +Preparing to unpack .../39-libalgorithm-diff-xs-perl_0.04-5_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl (0.04-5) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../40-libalgorithm-merge-perl_0.08-3_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-3) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../41-libexpat1-dev_2.2.5-3ubuntu0.2_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.2.5-3ubuntu0.2) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../42-libfile-fcntllock-perl_0.22-3build2_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-3build2) ... +Selecting previously unselected package libpython3.6-dev:amd64. +Preparing to unpack .../43-libpython3.6-dev_3.6.9-1~18.04ubuntu1.4_amd64.deb ... +Unpacking libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.4) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../44-libpython3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.6.7-1~18.04) ... +Selecting previously unselected package manpages-dev. +Preparing to unpack .../45-manpages-dev_4.15-1_all.deb ... +Unpacking manpages-dev (4.15-1) ... +Selecting previously unselected package python-pip-whl. +Preparing to unpack .../46-python-pip-whl_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-crypto. +Preparing to unpack .../47-python3-crypto_2.6.1-8ubuntu2_amd64.deb ... +Unpacking python3-crypto (2.6.1-8ubuntu2) ... +Selecting previously unselected package python3.6-dev. +Preparing to unpack .../48-python3.6-dev_3.6.9-1~18.04ubuntu1.4_amd64.deb ... +Unpacking python3.6-dev (3.6.9-1~18.04ubuntu1.4) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../49-python3-dev_3.6.7-1~18.04_amd64.deb ... +Unpacking python3-dev (3.6.7-1~18.04) ... +Selecting previously unselected package python3-secretstorage. +Preparing to unpack .../50-python3-secretstorage_2.3.1-2_all.deb ... +Unpacking python3-secretstorage (2.3.1-2) ... +Selecting previously unselected package python3-keyring. +Preparing to unpack .../51-python3-keyring_10.6.0-1_all.deb ... +Unpacking python3-keyring (10.6.0-1) ... +Selecting previously unselected package python3-keyrings.alt. +Preparing to unpack .../52-python3-keyrings.alt_3.0-1_all.deb ... +Unpacking python3-keyrings.alt (3.0-1) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../53-python3-pip_9.0.1-2.3~ubuntu1.18.04.5_all.deb ... +Unpacking python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Selecting previously unselected package python3-setuptools. +Preparing to unpack .../54-python3-setuptools_39.0.1-2_all.deb ... +Unpacking python3-setuptools (39.0.1-2) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../55-python3-wheel_0.30.0-0.2_all.deb ... +Unpacking python3-wheel (0.30.0-0.2) ... +Selecting previously unselected package python3-xdg. +Preparing to unpack .../56-python3-xdg_0.25-4ubuntu1.1_all.deb ... +Unpacking python3-xdg (0.25-4ubuntu1.1) ... +Setting up libquadmath0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libgomp1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libatomic1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python-pip-whl (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libcc1-0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up make (4.1-9.1ubuntu1) ... +Setting up python3-crypto (2.6.1-8ubuntu2) ... +Setting up libtsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up python3-xdg (0.25-4ubuntu1.1) ... +Setting up python3-keyrings.alt (3.0-1) ... +Setting up linux-libc-dev:amd64 (4.15.0-154.161) ... +Setting up libdpkg-perl (1.19.0.5ubuntu2.3) ... +Setting up python3-wheel (0.30.0-0.2) ... +Setting up liblsan0:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up gcc-7-base:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up binutils-common:amd64 (2.30-21ubuntu1~18.04.5) ... +Setting up libfile-fcntllock-perl (0.22-3build2) ... +Setting up libmpx2:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libfakeroot:amd64 (1.22-2ubuntu1) ... +Setting up libalgorithm-diff-perl (1.19.03-1) ... +Setting up libmpc3:amd64 (1.1.0-1) ... +Setting up libc-dev-bin (2.27-3ubuntu1.4) ... +Setting up python3-lib2to3 (3.6.9-1~18.04) ... +Setting up python3-secretstorage (2.3.1-2) ... +Setting up manpages-dev (4.15-1) ... +Setting up libc6-dev:amd64 (2.27-3ubuntu1.4) ... +Setting up python3-distutils (3.6.9-1~18.04) ... +Setting up libitm1:amd64 (8.4.0-1ubuntu1~18.04) ... +Setting up libisl19:amd64 (0.19-1) ... +Setting up libasan4:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-keyring (10.6.0-1) ... +Setting up libbinutils:amd64 (2.30-21ubuntu1~18.04.5) ... +Setting up libcilkrts5:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libubsan0:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up fakeroot (1.22-2ubuntu1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up libgcc-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up cpp-7 (7.5.0-3ubuntu1~18.04) ... +Setting up libstdc++-7-dev:amd64 (7.5.0-3ubuntu1~18.04) ... +Setting up libalgorithm-merge-perl (0.08-3) ... +Setting up libalgorithm-diff-xs-perl (0.04-5) ... +Setting up python3-pip (9.0.1-2.3~ubuntu1.18.04.5) ... +Setting up libexpat1-dev:amd64 (2.2.5-3ubuntu0.2) ... +Setting up python3-setuptools (39.0.1-2) ... +Setting up dh-python (3.20180325ubuntu2) ... +Setting up binutils-x86-64-linux-gnu (2.30-21ubuntu1~18.04.5) ... +Setting up cpp (4:7.4.0-1ubuntu2.3) ... +Setting up libpython3.6-dev:amd64 (3.6.9-1~18.04ubuntu1.4) ... +Setting up binutils (2.30-21ubuntu1~18.04.5) ... +Setting up python3.6-dev (3.6.9-1~18.04ubuntu1.4) ... +Setting up libpython3-dev:amd64 (3.6.7-1~18.04) ... +Setting up gcc-7 (7.5.0-3ubuntu1~18.04) ... +Setting up g++-7 (7.5.0-3ubuntu1~18.04) ... +Setting up python3-dev (3.6.7-1~18.04) ... +Setting up gcc (4:7.4.0-1ubuntu2.3) ... +Setting up dpkg-dev (1.19.0.5ubuntu2.3) ... +Setting up g++ (4:7.4.0-1ubuntu2.3) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up build-essential (12.4ubuntu1) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Processing triggers for libc-bin (2.27-3ubuntu1.4) ... +Collecting pip + Downloading https://files.pythonhosted.org/packages/ca/31/b88ef447d595963c01060998cb329251648acf4a067721b0452c45527eb8/pip-21.2.4-py3-none-any.whl (1.6MB) +Installing collected packages: pip + Found existing installation: pip 9.0.1 + Not uninstalling pip at /usr/lib/python3/dist-packages, outside environment /usr +Successfully installed pip-21.2.4 +Collecting python-magic + Downloading python_magic-0.4.24-py2.py3-none-any.whl (12 kB) +Collecting pyangbind + Downloading pyangbind-0.8.1.tar.gz (48 kB) +Collecting verboselogs + Downloading verboselogs-1.7-py2.py3-none-any.whl (11 kB) +Collecting bitarray + Downloading bitarray-2.3.3.tar.gz (88 kB) +Collecting enum34 + Downloading enum34-1.1.10-py3-none-any.whl (11 kB) +Collecting lxml + Downloading lxml-4.6.3-cp36-cp36m-manylinux2014_x86_64.whl (6.3 MB) +Collecting pyang + Downloading pyang-2.5.0-py2.py3-none-any.whl (595 kB) +Collecting regex + Downloading regex-2021.8.28-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (745 kB) +Requirement already satisfied: six in /usr/lib/python3/dist-packages (from pyangbind) (1.11.0) +Building wheels for collected packages: pyangbind, bitarray + Building wheel for pyangbind (setup.py): started + Building wheel for pyangbind (setup.py): finished with status 'done' + Created wheel for pyangbind: filename=pyangbind-0.8.1-py3-none-any.whl size=49314 sha256=140e4151d21f3d10fcbc56de395aa09b61e47ecbc79aaab999442713ebc53f29 + Stored in directory: /root/.cache/pip/wheels/d7/5f/16/210e82959deac8e57e539448ff940505a957125a521cb2a828 + Building wheel for bitarray (setup.py): started + Building wheel for bitarray (setup.py): finished with status 'done' + Created wheel for bitarray: filename=bitarray-2.3.3-cp36-cp36m-linux_x86_64.whl size=179330 sha256=df311306efa524693e9fba67ad27e40f409dba5a9b154fe251da2534aaab8ed8 + Stored in directory: /root/.cache/pip/wheels/4e/7b/5c/2d301a7bb02c7f8965a161411e8a445315b0be3512f20631ea +Successfully built pyangbind bitarray +Installing collected packages: lxml, regex, pyang, enum34, bitarray, verboselogs, python-magic, pyangbind +Successfully installed bitarray-2.3.3 enum34-1.1.10 lxml-4.6.3 pyang-2.5.0 pyangbind-0.8.1 python-magic-0.4.24 regex-2021.8.28 verboselogs-1.7 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +The following NEW packages will be installed: + python3-osm-im python3-osmclient +0 upgraded, 2 newly installed, 0 to remove and 7 not upgraded. +Need to get 248 kB of archives. +After this operation, 8100 kB of additional disk space will be used. +Get:1 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/IM amd64 python3-osm-im all 10.0.1+g8c2165c-1 [186 kB] +Get:2 https://osm-download.etsi.org/repository/osm/debian/ReleaseTEN stable/osmclient amd64 python3-osmclient all 10.0.1+g42e87fa-1 [61.9 kB] +Fetched 248 kB in 0s (575 kB/s) +Selecting previously unselected package python3-osm-im. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 68374 files and directories currently installed.) +Preparing to unpack .../python3-osm-im_10.0.1+g8c2165c-1_all.deb ... +Unpacking python3-osm-im (10.0.1+g8c2165c-1) ... +Selecting previously unselected package python3-osmclient. +Preparing to unpack .../python3-osmclient_10.0.1+g42e87fa-1_all.deb ... +Unpacking python3-osmclient (10.0.1+g42e87fa-1) ... +Setting up python3-osmclient (10.0.1+g42e87fa-1) ... +Setting up python3-osm-im (10.0.1+g8c2165c-1) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting bitarray==1.8.1 + Downloading bitarray-1.8.1.tar.gz (62 kB) +Requirement already satisfied: enum34==1.1.10 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 3)) (1.1.10) +Requirement already satisfied: lxml==4.6.3 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 5)) (4.6.3) +Collecting pyang==2.4.0 + Downloading pyang-2.4.0-py2.py3-none-any.whl (591 kB) +Requirement already satisfied: pyangbind==0.8.1 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osm_im/requirements.txt (line 13)) (0.8.1) +Collecting pyyaml==5.4.1 + Downloading PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl (640 kB) +Collecting regex==2021.3.17 + Downloading regex-2021.3.17-cp36-cp36m-manylinux2014_x86_64.whl (723 kB) +Collecting six==1.15.0 + Downloading six-1.15.0-py2.py3-none-any.whl (10 kB) +Building wheels for collected packages: bitarray + Building wheel for bitarray (setup.py): started + Building wheel for bitarray (setup.py): finished with status 'done' + Created wheel for bitarray: filename=bitarray-1.8.1-cp36-cp36m-linux_x86_64.whl size=126218 sha256=f32c028f13494d5542f5d4891eac1ac4e1ede30a33123b0c8fa931ca01bdbf7a + Stored in directory: /home/ubuntu/.cache/pip/wheels/06/56/e5/6eb78d4f54ad7874a6f68bcefc2c82663d5ca22d6503351a81 +Successfully built bitarray +Installing collected packages: six, regex, pyang, bitarray, pyyaml +Successfully installed bitarray-1.8.1 pyang-2.4.0 pyyaml-5.4.1 regex-2021.3.17 six-1.15.0 +Reading package lists... +Building dependency tree... +Reading state information... +The following packages were automatically installed and are no longer required: + dns-root-data dnsmasq-base libuv1 uidmap xdelta3 +Use 'sudo apt autoremove' to remove them. +Suggested packages: + libcurl4-doc libidn11-dev libkrb5-dev libldap2-dev librtmp-dev libssh2-1-dev + pkg-config zlib1g-dev libssl-doc +The following NEW packages will be installed: + libcurl4-openssl-dev libssl-dev +0 upgraded, 2 newly installed, 0 to remove and 7 not upgraded. +Need to get 1869 kB of archives. +After this operation, 9277 kB of additional disk space will be used. +Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libcurl4-openssl-dev amd64 7.58.0-2ubuntu3.14 [301 kB] +Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libssl-dev amd64 1.1.1-1ubuntu2.1~18.04.13 [1568 kB] +Fetched 1869 kB in 0s (4051 kB/s) +Selecting previously unselected package libcurl4-openssl-dev:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 68456 files and directories currently installed.) +Preparing to unpack .../libcurl4-openssl-dev_7.58.0-2ubuntu3.14_amd64.deb ... +Unpacking libcurl4-openssl-dev:amd64 (7.58.0-2ubuntu3.14) ... +Selecting previously unselected package libssl-dev:amd64. +Preparing to unpack .../libssl-dev_1.1.1-1ubuntu2.1~18.04.13_amd64.deb ... +Unpacking libssl-dev:amd64 (1.1.1-1ubuntu2.1~18.04.13) ... +Setting up libssl-dev:amd64 (1.1.1-1ubuntu2.1~18.04.13) ... +Setting up libcurl4-openssl-dev:amd64 (7.58.0-2ubuntu3.14) ... +Processing triggers for man-db (2.8.3-2ubuntu0.1) ... +Defaulting to user installation because normal site-packages is not writeable +Collecting certifi==2020.12.5 + Downloading certifi-2020.12.5-py2.py3-none-any.whl (147 kB) +Collecting chardet==4.0.0 + Downloading chardet-4.0.0-py2.py3-none-any.whl (178 kB) +Collecting click==7.1.2 + Downloading click-7.1.2-py2.py3-none-any.whl (82 kB) +Collecting idna==2.10 + Downloading idna-2.10-py2.py3-none-any.whl (58 kB) +Collecting jinja2==2.11.3 + Downloading Jinja2-2.11.3-py2.py3-none-any.whl (125 kB) +Collecting markupsafe==1.1.1 + Downloading MarkupSafe-1.1.1-cp36-cp36m-manylinux2010_x86_64.whl (32 kB) +Collecting packaging==20.9 + Downloading packaging-20.9-py2.py3-none-any.whl (40 kB) +Collecting prettytable==2.1.0 + Downloading prettytable-2.1.0-py3-none-any.whl (22 kB) +Collecting pycurl==7.43.0.6 + Downloading pycurl-7.43.0.6.tar.gz (222 kB) +Collecting pyparsing==2.4.7 + Downloading pyparsing-2.4.7-py2.py3-none-any.whl (67 kB) +Collecting python-magic==0.4.22 + Downloading python_magic-0.4.22-py2.py3-none-any.whl (12 kB) +Requirement already satisfied: pyyaml==5.4.1 in /home/ubuntu/.local/lib/python3.6/site-packages (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 23)) (5.4.1) +Collecting requests==2.25.1 + Downloading requests-2.25.1-py2.py3-none-any.whl (61 kB) +Collecting urllib3==1.26.4 + Downloading urllib3-1.26.4-py2.py3-none-any.whl (153 kB) +Requirement already satisfied: verboselogs==1.7 in /usr/local/lib/python3.6/dist-packages (from -r /usr/lib/python3/dist-packages/osmclient/requirements.txt (line 29)) (1.7) +Collecting wcwidth==0.2.5 + Downloading wcwidth-0.2.5-py2.py3-none-any.whl (30 kB) +Collecting importlib-metadata + Downloading importlib_metadata-4.8.1-py3-none-any.whl (17 kB) +Collecting typing-extensions>=3.6.4 + Downloading typing_extensions-3.10.0.2-py3-none-any.whl (26 kB) +Collecting zipp>=0.5 + Downloading zipp-3.5.0-py3-none-any.whl (5.7 kB) +Building wheels for collected packages: pycurl + Building wheel for pycurl (setup.py): started + Building wheel for pycurl (setup.py): finished with status 'done' + Created wheel for pycurl: filename=pycurl-7.43.0.6-cp36-cp36m-linux_x86_64.whl size=274584 sha256=5ea5e630a693e05885be5c562b012f216a144d3d404807238967d84e2873fe9c + Stored in directory: /home/ubuntu/.cache/pip/wheels/6b/17/54/ab9d3a8137df47050b265e8e01e1e161e423e6b6ec33bfd7d4 +Successfully built pycurl +Installing collected packages: zipp, typing-extensions, wcwidth, urllib3, pyparsing, markupsafe, importlib-metadata, idna, chardet, certifi, requests, python-magic, pycurl, prettytable, packaging, jinja2, click + WARNING: The script chardetect is installed in '/home/ubuntu/.local/bin' which is not on PATH. + Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. +Successfully installed certifi-2020.12.5 chardet-4.0.0 click-7.1.2 idna-2.10 importlib-metadata-4.8.1 jinja2-2.11.3 markupsafe-1.1.1 packaging-20.9 prettytable-2.1.0 pycurl-7.43.0.6 pyparsing-2.4.7 python-magic-0.4.22 requests-2.25.1 typing-extensions-3.10.0.2 urllib3-1.26.4 wcwidth-0.2.5 zipp-3.5.0 + +OSM client installed +OSM client assumes that OSM host is running in localhost (127.0.0.1). +In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file: + export OSM_HOSTNAME= +Checking OSM health state... +Check OSM status with: kubectl -n osm get all +4f9406a6-215d-4a1c-8995-32e9a44d214a +66c9de33-febc-4656-b164-f19e32f0f3db + +DONE diff --git a/_tmp/osm-install/multipass.install.sh b/_tmp/osm-install/multipass.install.sh new file mode 100755 index 0000000..feed2da --- /dev/null +++ b/_tmp/osm-install/multipass.install.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +multipass launch --name osm --cpus 2 --mem 6G --disk 40G 18.04 + +multipass mount ./ osm:/mnt/osm-install + +# multipass exec osm -- cd /mnt/osm-install && ./patched.install_osm.sh 2>&1 | tee install.log +# ^ sudo issue + +multipass shell osm + +# Base OSM install +# +# $ cd /mnt/osm-install +# $ ./patched.install_osm.sh 2>&1 | tee install.log + +# KNF setup for an isolated K8s cluster, copy-pasted from: +# - https://osm.etsi.org/docs/user-guide/05-osm-usage.html#adding-kubernetes-cluster-to-osm +# but changed version to the actual K8s server version returned by `kubectl version` +# +# $ osm vim-create --name mylocation1 --user u --password p --tenant p --account_type dummy --auth_url http://localhost/dummy +# $ osm k8scluster-add cluster --creds .kube/config --vim mylocation1 --k8s-nets '{k8s_net1: null}' --version "v1.15.12" --description="Isolated K8s cluster in mylocation1" + +# Some rops where to fetch Helm charts for KNF +# +# $ osm repo-add --type helm-chart --description "Bitnami repo" bitnami https://charts.bitnami.com/bitnami +# $ osm repo-add --type helm-chart --description "Cetic repo" cetic https://cetic.github.io/helm-charts +# $ osm repo-add --type helm-chart --description "Elastic repo" elastic https://helm.elastic.co + +# To clean up: +# +# $ multipass stop osm +# $ multipass delete osm +# $ multipass purge diff --git a/_tmp/osm-install/patched.full_install_osm.sh b/_tmp/osm-install/patched.full_install_osm.sh new file mode 100755 index 0000000..5a51010 --- /dev/null +++ b/_tmp/osm-install/patched.full_install_osm.sh @@ -0,0 +1,1870 @@ +#!/bin/bash +# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" +# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" +# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null; then + echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" + echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections + echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections + sudo apt-get -yq install iptables-persistent + fi +} + +#Configure NAT rules, based on the current IP addresses of containers +function nat(){ + check_install_iptables_persistent + + echo -e "\nConfiguring NAT rules" + echo -e " Required root privileges" + sudo $OSM_DEVOPS/installers/nat_osm +} + +function FATAL(){ + echo "FATAL error: Cannot install OSM due to \"$1\"" + exit 1 +} + +function update_juju_images(){ + crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab - + ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic +} + +function install_lxd() { + # Apply sysctl production values for optimal performance + sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf + sudo sysctl --system + + # Install LXD snap + sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client + sudo snap install lxd + + # Configure LXD + sudo usermod -a -G lxd `whoami` + cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed" + sg lxd -c "lxd waitready" + DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU" + sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU" + #sudo systemctl stop lxd-bridge + #sudo systemctl --system daemon-reload + #sudo systemctl enable lxd-bridge + #sudo systemctl start lxd-bridge +} + +function ask_user(){ + # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive + # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed + # Return: true(0) if user type 'yes'; false (1) if user type 'no' + read -e -p "$1" USER_CONFIRMATION + while true ; do + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 + [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 + [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 + read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION + done +} + +function install_osmclient(){ + CLIENT_RELEASE=${RELEASE#"-R "} + CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" + CLIENT_REPOSITORY=${REPOSITORY#"-r "} + CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} + key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY + curl $key_location | sudo apt-key add - + sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM" + sudo apt-get update + sudo apt-get install -y python3-pip + sudo -H LC_ALL=C python3 -m pip install -U pip + sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs + sudo apt-get install -y python3-osm-im python3-osmclient + if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then + python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt + fi + if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then + sudo apt-get install -y libcurl4-openssl-dev libssl-dev + python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt + fi + #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc + #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc + #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` + echo -e "\nOSM client installed" + if [ -z "$INSTALL_LIGHTWEIGHT" ]; then + echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" + echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" + echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" + else + echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)." + echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:" + echo " export OSM_HOSTNAME=" + fi + return 0 +} + +function install_prometheus_nodeexporter(){ + if (systemctl -q is-active node_exporter) + then + echo "Node Exporter is already running." + else + echo "Node Exporter is not active, installing..." + if getent passwd node_exporter > /dev/null 2>&1; then + echo "node_exporter user exists" + else + echo "Creating user node_exporter" + sudo useradd --no-create-home --shell /bin/false node_exporter + fi + wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/ + sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz + sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin + sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter + sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64* + sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo systemctl restart node_exporter + sudo systemctl enable node_exporter + echo "Node Exporter has been activated in this host." + fi + return 0 +} + +function uninstall_prometheus_nodeexporter(){ + sudo systemctl stop node_exporter + sudo systemctl disable node_exporter + sudo rm /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo userdel node_exporter + sudo rm /usr/local/bin/node_exporter + return 0 +} + +function install_docker_ce() { + # installs and configures Docker CE + echo "Installing Docker CE ..." + sudo apt-get -qq update + sudo apt-get install -y apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get -qq update + sudo apt-get install -y docker-ce + echo "Adding user to group 'docker'" + sudo groupadd -f docker + sudo usermod -aG docker $USER + sleep 2 + sudo service docker restart + echo "... restarted Docker service" + if [ -n "${DOCKER_PROXY_URL}" ]; then + echo "Configuring docker proxy ..." + if [ -f /etc/docker/daemon.json ]; then + if grep -q registry-mirrors /etc/docker/daemon.json; then + sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json + else + sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json + fi + else + sudo bash -c "cat << EOF > /etc/docker/daemon.json +{ + \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] +} +EOF" + fi + sudo systemctl daemon-reload + sudo service docker restart + echo "... restarted Docker service again" + fi + sg docker -c "docker version" || FATAL "Docker installation failed" + echo "... Docker CE installation done" + return 0 +} + +function install_docker_compose() { + # installs and configures docker-compose + echo "Installing Docker Compose ..." + sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + echo "... Docker Compose installation done" +} + +function install_juju() { + echo "Installing juju" + sudo snap install juju --classic --channel=$JUJU_VERSION/stable + [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}" + [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images + echo "Finished installation of juju" + return 0 +} + +function juju_createcontroller() { + if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then + # Not found created, create the controller + sudo usermod -a -G lxd ${USER} + sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME" + fi + [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed" + juju controller-config features=[k8s-operators] +} + +function juju_addk8s() { + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath +} + +function juju_createcontroller_k8s(){ + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client + juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \ + --config controller-service-type=loadbalancer \ + --agent-version=$JUJU_AGENT_VERSION +} + + +function juju_addlxd_cloud(){ + mkdir -p /tmp/.osm + OSM_VCA_CLOUDNAME="lxd-cloud" + LXDENDPOINT=$DEFAULT_IP + LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml + LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml + + cat << EOF > $LXD_CLOUD +clouds: + $OSM_VCA_CLOUDNAME: + type: lxd + auth-types: [certificate] + endpoint: "https://$LXDENDPOINT:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'` + + cat << EOF > $LXD_CREDENTIALS +credentials: + $OSM_VCA_CLOUDNAME: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: /tmp/.osm/client.crt + juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force + juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS + sg lxd -c "lxd waitready" + juju controller-config features=[k8s-operators] +} + + +function juju_createproxy() { + check_install_iptables_persistent + + if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then + sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST + sudo netfilter-persistent save + fi +} + +function docker_login() { + echo "Docker login" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}" +} + +function generate_docker_images() { + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then + sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" + done + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." + fi + echo "Finished generation of docker images" + fi + + echo "Finished pulling and generating docker images" +} + +function cmp_overwrite() { + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi +} + +function generate_docker_compose_files() { + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml + if [ -n "$INSTALL_PLA" ]; then + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml + fi +} + +function generate_k8s_manifest_files() { + #Kubernetes resources + $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml +} + +function generate_prometheus_grafana_files() { + [ -n "$KUBERNETES" ] && return + # Prometheus files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml + + # Grafana files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json + + # Prometheus Exporters files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service +} + +function generate_docker_env_files() { + echo "Doing a backup of existing env files" + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi + + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi + + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env + fi + + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi + + echo "Finished generation of docker env files" +} + +function generate_osmclient_script () { + echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm + $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm" + echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm" +} + +#installs kubernetes packages +function install_kube() { + sudo apt-get update && sudo apt-get install -y apt-transport-https + curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main" + sudo apt-get update + echo "Installing Kubernetes Packages ..." + sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 + sudo apt-mark hold kubelet kubeadm kubectl +} + +#initializes kubernetes control plane +function init_kubeadm() { + sudo swapoff -a + sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm init --config $1 + sleep 5 +} + +function kube_config_dir() { + [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes" + mkdir -p $HOME/.kube + sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config +} + +function install_k8s_storageclass() { + echo "Installing OpenEBS" + kubectl create ns openebs + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install --namespace openebs openebs openebs/openebs --version 1.12.0 + helm ls -n openebs + local storageclass_timeout=600 + local counter=0 + local storageclass_ready="" + echo "Waiting for storageclass" + while (( counter < storageclass_timeout )) + do + kubectl get storageclass openebs-hostpath &> /dev/null + + if [ $? -eq 0 ] ; then + echo "Storageclass available" + storageclass_ready="y" + break + else + counter=$((counter + 15)) + sleep 15 + fi + done + [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs" + kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +} + +function install_k8s_metallb() { + METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP + cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f - + echo "apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - $METALLB_IP_RANGE" | kubectl apply -f - +} +#deploys flannel as daemonsets +function deploy_cni_provider() { + CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")" + trap 'rm -rf "${CNI_DIR}"' EXIT + wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR + kubectl apply -f $CNI_DIR + [ $? -ne 0 ] && FATAL "Cannot Install Flannel" +} + +#creates secrets from env files which will be used by containers +function kube_secrets(){ + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env +} + +#taints K8s master node +function taint_master_node() { + K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}') + kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule- + sleep 5 +} + +#deploys osm pods and services +function deploy_osm_services() { + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR +} + +#deploy charmed services +function deploy_charmed_services() { + juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME + juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME +} + +function deploy_osm_pla_service() { + # corresponding to namespace_vol + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla +} + +#Install Helm v3 +function install_helm() { + helm > /dev/null 2>&1 + if [ $? != 0 ] ; then + # Helm is not installed. Install helm + echo "Helm is not installed, installing ..." + curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz + tar -zxvf helm-v3.6.3.tar.gz + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -r linux-amd64 + rm helm-v3.6.3.tar.gz + helm repo add stable https://charts.helm.sh/stable + helm repo update + fi +} + +function parse_yaml() { + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi + done +} + +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "10" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services + fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi +} + +function namespace_vol() { + osm_services="nbi lcm ro pol mon kafka mysql prometheus" + for osm in $osm_services; do + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + done +} + +function init_docker_swarm() { + if [ "${DEFAULT_MTU}" != "1500" ]; then + DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` + DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` + sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + fi + sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" + return 0 +} + +function create_docker_network() { + echo "creating network" + sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}" + echo "creating network DONE" +} + +function deploy_lightweight() { + + echo "Deploying lightweight build" + OSM_NBI_PORT=9999 + OSM_RO_PORT=9090 + OSM_KEYSTONE_PORT=5000 + OSM_UI_PORT=80 + OSM_MON_PORT=8662 + OSM_PROM_PORT=9090 + OSM_PROM_CADVISOR_PORT=8080 + OSM_PROM_HOSTPORT=9091 + OSM_GRAFANA_PORT=3000 + [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601 + #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000 + + if [ -n "$NO_HOST_PORTS" ]; then + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT) + else + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT) + fi + echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + + pushd $OSM_DOCKER_WORK_DIR + if [ -n "$INSTALL_PLA" ]; then + track deploy_osm_pla + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME" + else + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME" + fi + popd + + echo "Finished deployment of lightweight build" +} + +function deploy_elk() { + echo "Pulling docker images for ELK" + sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image" + sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image" + sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image" + sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image" + sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image" + echo "Finished pulling elk docker images" + $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk" + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk + remove_stack osm_elk + echo "Deploying ELK stack" + sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk" + echo "Waiting for ELK stack to be up and running" + time=0 + step=5 + timelength=40 + elk_is_up=1 + while [ $time -le $timelength ]; do + if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then + elk_is_up=0 + break + fi + sleep $step + time=$((time+step)) + done + if [ $elk_is_up -eq 0 ]; then + echo "ELK is up and running. Trying to create index pattern..." + #Create index pattern + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null + #Make it the default index + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}" 2>/dev/null + else + echo "Cannot connect to Kibana to create index pattern." + echo "Once Kibana is running, you can use the following instructions to create index pattern:" + echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"' + echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}"' + fi + echo "Finished deployment of ELK stack" + return 0 +} + +function add_local_k8scluster() { + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s +} + +function install_lightweight() { + track checkingroot + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track noroot + + if [ -n "$KUBERNETES" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + + else + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + fi + track proceed + + echo "Installing lightweight build of OSM" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'` + [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + need_packages_lw="snapd" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || FATAL "failed to install $need_packages_lw" + install_lxd + fi + + track prereqok + + [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce) + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + #Installs Kubernetes + if [ -n "$KUBERNETES" ]; then + install_kube + track install_k8s + init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml + kube_config_dir + track init_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # uninstall OSM MONITORING + uninstall_k8s_monitoring + track uninstall_k8s_monitoring + fi + #remove old namespace + remove_k8s_namespace $OSM_STACK_NAME + deploy_cni_provider + taint_master_node + install_helm + track install_helm + install_k8s_storageclass + track k8s_storageclass + install_k8s_metallb + track k8s_metallb + else + #install_docker_compose + [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm + track docker_swarm + fi + + [ -z "$INSTALL_NOJUJU" ] && install_juju + track juju_install + + if [ -z "$OSM_VCA_HOST" ]; then + if [ -z "$CONTROLLER_NAME" ]; then + + if [ -n "$KUBERNETES" ]; then + juju_createcontroller_k8s + juju_addlxd_cloud + else + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + OSM_VCA_CLOUDNAME="lxd-cloud" + juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE + juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE + fi + juju_createcontroller + juju_createproxy + fi + else + OSM_VCA_CLOUDNAME="lxd-cloud" + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE + else + mkdir -p ~/.osm + cat << EOF > ~/.osm/lxd-cloud.yaml +clouds: + lxd-cloud: + type: lxd + auth-types: [certificate] + endpoint: "https://$DEFAULT_IP:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat ~/.osm/client.key | sed 's/^/ /'` + cat << EOF > ~/.osm/lxd-credentials.yaml +credentials: + lxd-cloud: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: ~/.osm/client.crt + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml + fi + fi + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address" + fi + track juju_controller + + if [ -z "$OSM_VCA_SECRET" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME) + [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret" + fi + if [ -z "$OSM_VCA_PUBKEY" ]; then + OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub) + [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key" + fi + if [ -z "$OSM_VCA_CACERT" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate" + fi + + # Set OSM_VCA_APIPROXY only when it is not a k8s installation + if [ -z "$KUBERNETES" ]; then + if [ -z "$OSM_VCA_APIPROXY" ]; then + OSM_VCA_APIPROXY=$DEFAULT_IP + [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy" + fi + juju_createproxy + fi + track juju + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" + fi + + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_build + + if [ -n "$KUBERNETES" ]; then + generate_k8s_manifest_files + else + generate_docker_compose_files + fi + track manifest_files + generate_prometheus_grafana_files + generate_docker_env_files + track env_files + + if [ -n "$KUBERNETES" ]; then + deploy_charmed_services + kube_secrets + update_manifest_files + namespace_vol + deploy_osm_services + if [ -n "$INSTALL_PLA"]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm_pla + fi + track deploy_osm_services_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track install_k8s_monitoring + fi + else + # remove old stack + remove_stack $OSM_STACK_NAME + create_docker_network + deploy_lightweight + generate_osmclient_script + track docker_deploy + install_prometheus_nodeexporter + track nodeexporter + [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu + [ -n "$INSTALL_ELK" ] && deploy_elk && track elk + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient + + echo -e "Checking OSM health state..." + if [ -n "$KUBERNETES" ]; then + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track osm_unhealthy + else + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \ + track osm_unhealthy + fi + track after_healthcheck + + [ -n "$KUBERNETES" ] && add_local_k8scluster + track add_local_k8scluster + + wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null + track end + return 0 +} + +function install_to_openstack() { + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + $WORKDIR_SUDO apt install -y python3-pip python3-venv + $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml + else + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml + fi + + # Exit from venv + deactivate + + return 0 +} + +function install_vimemu() { + echo "\nInstalling vim-emu" + EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" + trap 'rm -rf "${EMUTEMPDIR}"' EXIT + # install prerequisites (OVS is a must for the emulator to work) + sudo apt-get install openvswitch-switch + # clone vim-emu repository (attention: branch is currently master only) + echo "Cloning vim-emu repository ..." + git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR + # build vim-emu docker + echo "Building vim-emu Docker container..." + + sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image" + # start vim-emu container as daemon + echo "Starting vim-emu Docker container 'vim-emu' ..." + if [ -n "$INSTALL_LIGHTWEIGHT" ]; then + # in lightweight mode, the emulator needs to be attached to netOSM + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + else + # classic build mode + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + fi + echo "Waiting for 'vim-emu' container to start ..." + sleep 5 + export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu") + echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." + # print vim-emu connection info + echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" + echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" + echo -e "To add the emulated VIM to OSM you should do:" + echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" +} + +function install_k8s_monitoring() { + # install OSM monitoring + $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh +} + +function uninstall_k8s_monitoring() { + # uninstall OSM monitoring + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh +} + +function dump_vars(){ + echo "DEVELOP=$DEVELOP" + echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" + echo "RECONFIGURE=$RECONFIGURE" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_ONLY=$INSTALL_ONLY" + echo "INSTALL_ELK=$INSTALL_ELK" + echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES" + #echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" + echo "TO_REBUILD=$TO_REBUILD" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "RELEASE=$RELEASE" + echo "REPOSITORY=$REPOSITORY" + echo "REPOSITORY_BASE=$REPOSITORY_BASE" + echo "REPOSITORY_KEY=$REPOSITORY_KEY" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "WORKDIR_SUDO=$WORKDIR_SUDO" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "DOCKER_USER=$DOCKER_USER" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "KUBERNETES=$KUBERNETES" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "SHOWOPTS=$SHOWOPTS" + echo "Install from specific refspec (-b): $COMMIT_ID" +} + +function track(){ + ctime=`date +%s` + duration=$((ctime - SESSION_ID)) + url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" + #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" + event_name="bin" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" + [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" + event_name="${event_name}_$1" + url="${url}&event=${event_name}&ce_duration=${duration}" + wget -q -O /dev/null $url +} + +function parse_docker_registry_url() { + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') +} + +JUJU_VERSION=2.9 +JUJU_AGENT_VERSION=2.9.9 +UNINSTALL="" +DEVELOP="" +UPDATE="" +RECONFIGURE="" +TEST_INSTALLER="" +INSTALL_LXD="" +SHOWOPTS="" +COMMIT_ID="" +ASSUME_YES="" +INSTALL_FROM_SOURCE="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_VIMEMU="" +INSTALL_PLA="" +LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" +LXD_REPOSITORY_PATH="" +INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" +INSTALL_ONLY="" +INSTALL_ELK="" +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +KUBERNETES="y" +INSTALL_K8S_MONITOR="" +INSTALL_NOHOSTCLIENT="" +INSTALL_NOCACHELXDIMAGES="" +SESSION_ID=`date +%s` +OSM_DEVOPS= +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +WORKDIR_SUDO=sudo +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="/etc/osm/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +PROMETHEUS_TAG=v2.4.3 +GRAFANA_TAG=latest +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= + +while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do + case "${o}" in + b) + COMMIT_ID=${OPTARG} + PULL_IMAGES="" + ;; + r) + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") + ;; + c) + [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue + [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue + echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2 + usage && exit 1 + ;; + k) + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") + ;; + u) + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") + ;; + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") + ;; + D) + OSM_DEVOPS="${OPTARG}" + ;; + o) + INSTALL_ONLY="y" + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + w) + # when specifying workdir, do not use sudo for access + WORKDIR_SUDO= + OSM_WORK_DIR="${OPTARG}" + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue + [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue + [ "${OPTARG}" == "update" ] && UPDATE="y" && continue + [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue + [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue + [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue + [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue + echo -e "Invalid option: '--$OPTARG'\n" >&2 + usage && exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + y) + ASSUME_YES="y" + ;; + *) + usage && exit 1 + ;; + esac +done + +[ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" + +if [ -n "$SHOWOPTS" ]; then + dump_vars + exit 0 +fi + +if [ -n "$CHARMED" ]; then + if [ -n "$UNINSTALL" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + else + ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + fi + + exit 0 +fi + +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" + +need_packages="git wget curl tar" + +[ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0 + +echo -e "Checking required packages: $need_packages" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "Installing $need_packages requires root privileges." \ + || sudo apt-get install -y $need_packages \ + || FATAL "failed to install $need_packages" +sudo snap install jq +if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" + else + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS + + if [ -z "$COMMIT_ID" ]; then + echo -e "\nGuessing the current stable release" + LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1` + [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 + + echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" + COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" + else + echo -e "\nDEVOPS Using commit $COMMIT_ID" + fi + git -C $OSM_DEVOPS checkout $COMMIT_ID + fi +fi + +. $OSM_DEVOPS/common/all_funcs + +[ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" +[ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk +#[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 + +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null +track start + +[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 +echo -e "\nInstalling OSM from refspec: $COMMIT_ID" +if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then + ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +fi + +echo -e "Checking required packages: lxd" +lxd --version &>/dev/null || FATAL "lxd not present, exiting." +[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd + +# use local devops for containers +export OSM_USE_LOCAL_DEVOPS=true + +#Install osmclient + +#Install vim-emu (optional) +[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu + +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null +track end +echo -e "\nDONE" diff --git a/_tmp/osm-install/patched.install_osm.sh b/_tmp/osm-install/patched.install_osm.sh new file mode 100755 index 0000000..76c63e5 --- /dev/null +++ b/_tmp/osm-install/patched.install_osm.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +REPOSITORY_BASE=https://osm-download.etsi.org/repository/osm/debian +RELEASE=ReleaseTEN +REPOSITORY=stable +DOCKER_TAG=10 +DEVOPS_PATH=/usr/share/osm-devops + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +add_repo() { + REPO_CHECK="^$1" + grep "${REPO_CHECK/\[arch=amd64\]/\\[arch=amd64\\]}" /etc/apt/sources.list > /dev/null 2>&1 + if [ $? -ne 0 ] + then + need_packages_lw="software-properties-common apt-transport-https" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get -q update \ + || ! echo "failed to run apt-get update" \ + || exit 1 + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || ! echo "failed to install $need_packages_lw" \ + || exit 1 + wget -qO - $REPOSITORY_BASE/$RELEASE/OSM%20ETSI%20Release%20Key.gpg | sudo apt-key add - + sudo DEBIAN_FRONTEND=noninteractive add-apt-repository -y "$1" && sudo DEBIAN_FRONTEND=noninteractive apt-get update + return 0 + fi + + return 1 +} + +clean_old_repo() { +dpkg -s 'osm-devops' &> /dev/null +if [ $? -eq 0 ]; then + # Clean the previous repos that might exist + sudo sed -i "/osm-download.etsi.org/d" /etc/apt/sources.list +fi +} + +while getopts ":b:r:c:n:k:u:R:l:L:K:p:D:o:O:m:N:H:S:s:w:t:U:P:A:d:p:f:F:-: hy" o; do + case "${o}" in + D) + DEVOPS_PATH="${OPTARG}" + ;; + r) + REPOSITORY="${OPTARG}" + ;; + R) + RELEASE="${OPTARG}" + ;; + u) + REPOSITORY_BASE="${OPTARG}" + ;; + t) + DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + *) + ;; + esac +done + +clean_old_repo +add_repo "deb [arch=amd64] $REPOSITORY_BASE/$RELEASE $REPOSITORY devops" +sudo DEBIAN_FRONTEND=noninteractive apt-get -q update +sudo DEBIAN_FRONTEND=noninteractive apt-get install osm-devops +./patched.full_install_osm.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $DEVOPS_PATH -t $DOCKER_TAG "$@" diff --git a/_tmp/osm-install/rel10.full_install_osm.sh b/_tmp/osm-install/rel10.full_install_osm.sh new file mode 100644 index 0000000..0d8d653 --- /dev/null +++ b/_tmp/osm-install/rel10.full_install_osm.sh @@ -0,0 +1,1896 @@ +#!/bin/bash +# Copyright 2016 Telefónica Investigación y Desarrollo S.A.U. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -f : Public SSH key to use to deploy OSM to OpenStack" + echo -e " -F : Cloud-Init userdata file to deploy OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" +# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)" +# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +# takes a juju/accounts.yaml file and returns the password specific +# for a controller. I wrote this using only bash tools to minimize +# additions of other packages +function parse_juju_password { + password_file="${HOME}/.local/share/juju/accounts.yaml" + local controller_name=$1 + local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file | + awk -F$fs -v controller=$controller_name '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null; then + echo -e " Not installed.\nInstalling iptables-persistent requires root privileges" + echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections + echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections + sudo apt-get -yq install iptables-persistent + fi +} + +#Configure NAT rules, based on the current IP addresses of containers +function nat(){ + check_install_iptables_persistent + + echo -e "\nConfiguring NAT rules" + echo -e " Required root privileges" + sudo $OSM_DEVOPS/installers/nat_osm +} + +function FATAL(){ + echo "FATAL error: Cannot install OSM due to \"$1\"" + exit 1 +} + +function update_juju_images(){ + crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab - + ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic +} + +function install_lxd() { + # Apply sysctl production values for optimal performance + sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf + sudo sysctl --system + + # Install LXD snap + sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client + sudo snap install lxd + + # Configure LXD + sudo usermod -a -G lxd `whoami` + cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed" + sg lxd -c "lxd waitready" + DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU" + sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU" + #sudo systemctl stop lxd-bridge + #sudo systemctl --system daemon-reload + #sudo systemctl enable lxd-bridge + #sudo systemctl start lxd-bridge +} + +function ask_user(){ + # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive + # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed + # Return: true(0) if user type 'yes'; false (1) if user type 'no' + read -e -p "$1" USER_CONFIRMATION + while true ; do + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0 + [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1 + [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0 + [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1 + read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION + done +} + +function install_osmclient(){ + CLIENT_RELEASE=${RELEASE#"-R "} + CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" + CLIENT_REPOSITORY=${REPOSITORY#"-r "} + CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "} + key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY + curl $key_location | sudo apt-key add - + sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM" + sudo apt-get update + sudo apt-get install -y python3-pip + sudo -H LC_ALL=C python3 -m pip install -U pip + sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs + sudo apt-get install -y python3-osm-im python3-osmclient + if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then + python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt + fi + if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then + sudo apt-get install -y libcurl4-openssl-dev libssl-dev + python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt + fi + #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc + #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc + #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'` + [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'` + echo -e "\nOSM client installed" + if [ -z "$INSTALL_LIGHTWEIGHT" ]; then + echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:" + echo " export OSM_HOSTNAME=${OSM_HOSTNAME}" + echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}" + else + echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)." + echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:" + echo " export OSM_HOSTNAME=" + fi + return 0 +} + +function install_prometheus_nodeexporter(){ + if (systemctl -q is-active node_exporter) + then + echo "Node Exporter is already running." + else + echo "Node Exporter is not active, installing..." + if getent passwd node_exporter > /dev/null 2>&1; then + echo "node_exporter user exists" + else + echo "Creating user node_exporter" + sudo useradd --no-create-home --shell /bin/false node_exporter + fi + wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/ + sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz + sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin + sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter + sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64* + sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo systemctl restart node_exporter + sudo systemctl enable node_exporter + echo "Node Exporter has been activated in this host." + fi + return 0 +} + +function uninstall_prometheus_nodeexporter(){ + sudo systemctl stop node_exporter + sudo systemctl disable node_exporter + sudo rm /etc/systemd/system/node_exporter.service + sudo systemctl daemon-reload + sudo userdel node_exporter + sudo rm /usr/local/bin/node_exporter + return 0 +} + +function install_docker_ce() { + # installs and configures Docker CE + echo "Installing Docker CE ..." + sudo apt-get -qq update + sudo apt-get install -y apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get -qq update + sudo apt-get install -y docker-ce + echo "Adding user to group 'docker'" + sudo groupadd -f docker + sudo usermod -aG docker $USER + sleep 2 + sudo service docker restart + echo "... restarted Docker service" + if [ -n "${DOCKER_PROXY_URL}" ]; then + echo "Configuring docker proxy ..." + if [ -f /etc/docker/daemon.json ]; then + if grep -q registry-mirrors /etc/docker/daemon.json; then + sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json + else + sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json + fi + else + sudo bash -c "cat << EOF > /etc/docker/daemon.json +{ + \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] +} +EOF" + fi + sudo systemctl daemon-reload + sudo service docker restart + echo "... restarted Docker service again" + fi + sg docker -c "docker version" || FATAL "Docker installation failed" + echo "... Docker CE installation done" + return 0 +} + +function install_docker_compose() { + # installs and configures docker-compose + echo "Installing Docker Compose ..." + sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + echo "... Docker Compose installation done" +} + +function install_juju() { + echo "Installing juju" + sudo snap install juju --classic --channel=2.8/stable + [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}" + [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images + echo "Finished installation of juju" + return 0 +} + +function juju_createcontroller() { + if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then + # Not found created, create the controller + sudo usermod -a -G lxd ${USER} + sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME" + fi + [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed" + juju controller-config features=[k8s-operators] +} + +function juju_addk8s() { + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath +} + +function juju_createcontroller_k8s(){ + cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client + juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \ + --config controller-service-type=loadbalancer \ + --agent-version=$JUJU_AGENT_VERSION +} + + +function juju_addlxd_cloud(){ + mkdir -p /tmp/.osm + OSM_VCA_CLOUDNAME="lxd-cloud" + LXDENDPOINT=$DEFAULT_IP + LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml + LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml + + cat << EOF > $LXD_CLOUD +clouds: + $OSM_VCA_CLOUDNAME: + type: lxd + auth-types: [certificate] + endpoint: "https://$LXDENDPOINT:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'` + + cat << EOF > $LXD_CREDENTIALS +credentials: + $OSM_VCA_CLOUDNAME: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: /tmp/.osm/client.crt + juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force + juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS + sg lxd -c "lxd waitready" + juju controller-config features=[k8s-operators] +} + + +function juju_createproxy() { + check_install_iptables_persistent + + if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then + sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST + sudo netfilter-persistent save + fi +} + +function docker_login() { + echo "Docker login" + sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}" +} + +function generate_docker_images() { + echo "Pulling and generating docker images" + [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login + + echo "Pulling docker images" + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then + sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image" + sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then + sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then + sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then + sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then + sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then + sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image" + fi + + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then + sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image" + fi + + if [ -n "$PULL_IMAGES" ]; then + echo "Pulling OSM docker images" + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + module_tag="${OSM_DOCKER_TAG}" + if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then + module_tag="${MODULE_DOCKER_TAG}" + fi + echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image" + sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image" + done + else + _build_from=$COMMIT_ID + [ -z "$_build_from" ] && _build_from="latest" + echo "OSM Docker images generated from $_build_from" + + for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then + module_lower=${module,,} + if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then + continue + fi + git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module + git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID} + sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image" + fi + done + if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then + BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY") + BUILD_ARGS+=(--build-arg RELEASE="$RELEASE") + BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY") + BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE") + sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ." + fi + echo "Finished generation of docker images" + fi + + echo "Finished pulling and generating docker images" +} + +function cmp_overwrite() { + file1="$1" + file2="$2" + if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then + if [ -f "${file2}" ]; then + ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2} + else + cp -b ${file1} ${file2} + fi + fi +} + +function generate_docker_compose_files() { + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml + if [ -n "$INSTALL_PLA" ]; then + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml + fi +} + +function generate_k8s_manifest_files() { + #Kubernetes resources + $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR + $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml +} + +function generate_prometheus_grafana_files() { + [ -n "$KUBERNETES" ] && return + # Prometheus files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml + + # Grafana files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json + + # Prometheus Exporters files + $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service +} + +function generate_docker_env_files() { + echo "Doing a backup of existing env files" + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~} + $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~} + + echo "Generating docker env files" + # LCM + if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then + echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if [ -n "$OSM_VCA_APIPROXY" ]; then + if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + fi + + if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then + echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env + else + $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env + fi + + # RO + MYSQL_ROOT_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then + echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env + fi + if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then + echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env + fi + + # Keystone + KEYSTONE_DB_PASSWORD=$(generate_secret) + SERVICE_PASSWORD=$(generate_secret) + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then + echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env + fi + if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then + echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env + echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env + fi + + # NBI + if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then + echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env + echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env + fi + + # MON + if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then + echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then + echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env + else + $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env + fi + + + # POL + if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then + echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env + fi + + echo "Finished generation of docker env files" +} + +function generate_osmclient_script () { + echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm + $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm" + echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm" +} + +#installs kubernetes packages +function install_kube() { + sudo apt-get update && sudo apt-get install -y apt-transport-https + curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main" + sudo apt-get update + echo "Installing Kubernetes Packages ..." + sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 + sudo apt-mark hold kubelet kubeadm kubectl +} + +#initializes kubernetes control plane +function init_kubeadm() { + sudo swapoff -a + sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm init --config $1 + sleep 5 +} + +function kube_config_dir() { + [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes" + mkdir -p $HOME/.kube + sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config +} + +function install_k8s_storageclass() { + OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")" + trap 'rm -rf "${OPENEBS_DIR}"' EXIT + wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR + kubectl apply -f $OPENEBS_DIR + local storageclass_timeout=400 + local counter=0 + local storageclass_ready="" + echo "Waiting for storageclass" + while (( counter < storageclass_timeout )) + do + kubectl get storageclass openebs-hostpath &> /dev/null + + if [ $? -eq 0 ] ; then + echo "Storageclass available" + storageclass_ready="y" + break + else + counter=$((counter + 15)) + sleep 15 + fi + done + [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs" + kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +} + +function install_k8s_metallb() { + METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP + cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f - + echo "apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - $METALLB_IP_RANGE" | kubectl apply -f - +} +#deploys flannel as daemonsets +function deploy_cni_provider() { + CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")" + trap 'rm -rf "${CNI_DIR}"' EXIT + wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR + kubectl apply -f $CNI_DIR + [ $? -ne 0 ] && FATAL "Cannot Install Flannel" +} + +#creates secrets from env files which will be used by containers +function kube_secrets(){ + kubectl create ns $OSM_STACK_NAME + kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env + kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env + kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env + kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env + kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env + kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env + kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env +} + +#taints K8s master node +function taint_master_node() { + K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}') + kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule- + sleep 5 +} + +#deploys osm pods and services +function deploy_osm_services() { + kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR +} + +#deploy charmed services +function deploy_charmed_services() { + juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME + # deploy mongodb charm + namespace=$OSM_STACK_NAME + juju deploy cs:~charmed-osm/mongodb-k8s \ + --config enable-sidecar=true \ + --config replica-set=rs0 \ + --config namespace=$namespace \ + -m $namespace +} + +function deploy_osm_pla_service() { + # corresponding to namespace_vol + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml + # corresponding to deploy_osm_services + kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla +} + +#Install helm and tiller +function install_helm() { + helm > /dev/null 2>&1 + if [ $? != 0 ] ; then + # Helm is not installed. Install helm + echo "Helm is not installed, installing ..." + curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz + tar -zxvf helm-v2.15.2.tar.gz + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -r linux-amd64 + rm helm-v2.15.2.tar.gz + fi + + # Checking if tiller has being configured + kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1 + if [ $? == 1 ] ; then + # tiller account for kubernetes + kubectl --namespace kube-system create serviceaccount tiller + kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + # HELM initialization + helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller + + # Wait for Tiller to be up and running. If timeout expires, continue installing + tiller_timeout=120; + counter=0; + tiller_status="" + while (( counter < tiller_timeout )) + do + tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}` + ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break + counter=$((counter + 5)) + sleep 5 + done + [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue" + fi +} + +function parse_yaml() { + TAG=$1 + shift + services=$@ + for module in $services; do + if [ "$module" == "pla" ]; then + if [ -n "$INSTALL_PLA" ]; then + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml + fi + else + echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}" + $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml + fi + done +} + +function update_manifest_files() { + osm_services="nbi lcm ro pol mon ng-ui keystone pla" + list_of_services="" + for module in $osm_services; do + module_upper="${module^^}" + if ! echo $TO_REBUILD | grep -q $module_upper ; then + list_of_services="$list_of_services $module" + fi + done + if [ ! "$OSM_DOCKER_TAG" == "10" ]; then + parse_yaml $OSM_DOCKER_TAG $list_of_services + fi + if [ -n "$MODULE_DOCKER_TAG" ]; then + parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild + fi +} + +function namespace_vol() { + osm_services="nbi lcm ro pol mon kafka mysql prometheus" + for osm in $osm_services; do + $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml + done +} + +function init_docker_swarm() { + if [ "${DEFAULT_MTU}" != "1500" ]; then + DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s` + DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'` + sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge" + fi + sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}" + return 0 +} + +function create_docker_network() { + echo "creating network" + sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}" + echo "creating network DONE" +} + +function deploy_lightweight() { + + echo "Deploying lightweight build" + OSM_NBI_PORT=9999 + OSM_RO_PORT=9090 + OSM_KEYSTONE_PORT=5000 + OSM_UI_PORT=80 + OSM_MON_PORT=8662 + OSM_PROM_PORT=9090 + OSM_PROM_CADVISOR_PORT=8080 + OSM_PROM_HOSTPORT=9091 + OSM_GRAFANA_PORT=3000 + [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601 + #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000 + + if [ -n "$NO_HOST_PORTS" ]; then + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT) + else + OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT) + OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT) + OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT) + OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT) + OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT) + OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT) + OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT) + OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT) + #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT) + [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT) + fi + echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh + + pushd $OSM_DOCKER_WORK_DIR + if [ -n "$INSTALL_PLA" ]; then + track deploy_osm_pla + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME" + else + sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME" + fi + popd + + echo "Finished deployment of lightweight build" +} + +function deploy_elk() { + echo "Pulling docker images for ELK" + sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image" + sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image" + sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image" + sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image" + sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image" + echo "Finished pulling elk docker images" + $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk" + $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk + remove_stack osm_elk + echo "Deploying ELK stack" + sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk" + echo "Waiting for ELK stack to be up and running" + time=0 + step=5 + timelength=40 + elk_is_up=1 + while [ $time -le $timelength ]; do + if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then + elk_is_up=0 + break + fi + sleep $step + time=$((time+step)) + done + if [ $elk_is_up -eq 0 ]; then + echo "ELK is up and running. Trying to create index pattern..." + #Create index pattern + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null + #Make it the default index + curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}" 2>/dev/null + else + echo "Cannot connect to Kibana to create index pattern." + echo "Once Kibana is running, you can use the following instructions to create index pattern:" + echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \ + -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"' + echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \ + -d"{\"value\":\"filebeat-*\"}"' + fi + echo "Finished deployment of ELK stack" + return 0 +} + +function add_local_k8scluster() { + /usr/bin/osm --all-projects vim-create \ + --name _system-osm-vim \ + --account_type dummy \ + --auth_url http://dummy \ + --user osm --password osm --tenant osm \ + --description "dummy" \ + --config '{management_network_name: mgmt}' + /usr/bin/osm --all-projects k8scluster-add \ + --creds ${HOME}/.kube/config \ + --vim _system-osm-vim \ + --k8s-nets '{"net1": null}' \ + --version '1.15' \ + --description "OSM Internal Cluster" \ + _system-osm-k8s +} + +function install_lightweight() { + track checkingroot + [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges." + track noroot + + if [ -n "$KUBERNETES" ]; then + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following + 1. Install and configure LXD + 2. Install juju + 3. Install docker CE + 4. Disable swap space + 5. Install and initialize Kubernetes + as pre-requirements. + Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + + else + [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1 + fi + track proceed + + echo "Installing lightweight build of OSM" + LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")" + trap 'rm -rf "${LWTEMPDIR}"' EXIT + DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}') + [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}') + [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0" + DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'` + [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route" + DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}') + + # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to + if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then + need_packages_lw="snapd" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || FATAL "failed to install $need_packages_lw" + install_lxd + fi + + track prereqok + + [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce) + + echo "Creating folders for installation" + [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR + [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla + [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml + + #Installs Kubernetes + if [ -n "$KUBERNETES" ]; then + install_kube + track install_k8s + init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml + kube_config_dir + track init_k8s + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # uninstall OSM MONITORING + uninstall_k8s_monitoring + track uninstall_k8s_monitoring + fi + #remove old namespace + remove_k8s_namespace $OSM_STACK_NAME + deploy_cni_provider + taint_master_node + install_k8s_storageclass + track k8s_storageclass + install_k8s_metallb + track k8s_metallb + else + #install_docker_compose + [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm + track docker_swarm + fi + + [ -z "$INSTALL_NOJUJU" ] && install_juju + track juju_install + + if [ -z "$OSM_VCA_HOST" ]; then + if [ -z "$CONTROLLER_NAME" ]; then + + if [ -n "$KUBERNETES" ]; then + juju_createcontroller_k8s + juju_addlxd_cloud + else + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + OSM_VCA_CLOUDNAME="lxd-cloud" + juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE + juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE + fi + juju_createcontroller + juju_createproxy + fi + else + OSM_VCA_CLOUDNAME="lxd-cloud" + if [ -n "$LXD_CLOUD_FILE" ]; then + [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external" + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE + else + mkdir -p ~/.osm + cat << EOF > ~/.osm/lxd-cloud.yaml +clouds: + lxd-cloud: + type: lxd + auth-types: [certificate] + endpoint: "https://$DEFAULT_IP:8443" + config: + ssl-hostname-verification: false +EOF + openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org" + local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'` + local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'` + local client_key=`cat ~/.osm/client.key | sed 's/^/ /'` + cat << EOF > ~/.osm/lxd-credentials.yaml +credentials: + lxd-cloud: + lxd-cloud: + auth-type: certificate + server-cert: | +$server_cert + client-cert: | +$client_cert + client-key: | +$client_key +EOF + lxc config trust add local: ~/.osm/client.crt + juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml + juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml + fi + fi + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'` + [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address" + fi + track juju_controller + + if [ -z "$OSM_VCA_SECRET" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME) + [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret" + fi + if [ -z "$OSM_VCA_PUBKEY" ]; then + OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub) + [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key" + fi + if [ -z "$OSM_VCA_CACERT" ]; then + [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n) + [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate" + fi + + # Set OSM_VCA_APIPROXY only when it is not a k8s installation + if [ -z "$KUBERNETES" ]; then + if [ -z "$OSM_VCA_APIPROXY" ]; then + OSM_VCA_APIPROXY=$DEFAULT_IP + [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy" + fi + juju_createproxy + fi + track juju + + if [ -z "$OSM_DATABASE_COMMONKEY" ]; then + OSM_DATABASE_COMMONKEY=$(generate_secret) + [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret" + fi + + # Deploy OSM services + [ -z "$DOCKER_NOBUILD" ] && generate_docker_images + track docker_build + + if [ -n "$KUBERNETES" ]; then + generate_k8s_manifest_files + else + generate_docker_compose_files + fi + track manifest_files + generate_prometheus_grafana_files + generate_docker_env_files + track env_files + + if [ -n "$KUBERNETES" ]; then + deploy_charmed_services + kube_secrets + update_manifest_files + namespace_vol + deploy_osm_services + if [ -n "$INSTALL_PLA"]; then + # optional PLA install + deploy_osm_pla_service + track deploy_osm_pla + fi + track deploy_osm_services_k8s + install_helm + track install_helm + if [ -n "$INSTALL_K8S_MONITOR" ]; then + # install OSM MONITORING + install_k8s_monitoring + track install_k8s_monitoring + fi + else + # remove old stack + remove_stack $OSM_STACK_NAME + create_docker_network + deploy_lightweight + generate_osmclient_script + track docker_deploy + install_prometheus_nodeexporter + track nodeexporter + [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu + [ -n "$INSTALL_ELK" ] && deploy_elk && track elk + fi + + [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient + track osmclient + + echo -e "Checking OSM health state..." + if [ -n "$KUBERNETES" ]; then + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \ + track osm_unhealthy + else + $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \ + echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \ + echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \ + track osm_unhealthy + fi + track after_healthcheck + + [ -n "$KUBERNETES" ] && add_local_k8scluster + track add_local_k8scluster + + wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null + track end + return 0 +} + +function install_to_openstack() { + + if [ -z "$2" ]; then + FATAL "OpenStack installer requires a valid external network name" + fi + + # Install Pip for Python3 + $WORKDIR_SUDO apt install -y python3-pip python3-venv + $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip + + # Create a venv to avoid conflicts with the host installation + python3 -m venv $OPENSTACK_PYTHON_VENV + + source $OPENSTACK_PYTHON_VENV/bin/activate + + # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train + python -m pip install -U wheel + python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11" + + # Install the Openstack cloud module (ansible>=2.10) + ansible-galaxy collection install openstack.cloud + + export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg" + + OSM_INSTALLER_ARGS="${REPO_ARGS[@]}" + + ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME" + + if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then + ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE" + fi + + if [ -n "$OPENSTACK_USERDATA_FILE" ]; then + ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE" + fi + + # Execute the Ansible playbook based on openrc or clouds.yaml + if [ -e "$1" ]; then + . $1 + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + $OSM_DEVOPS/installers/openstack/site.yml + else + ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \ + -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml + fi + + # Exit from venv + deactivate + + return 0 +} + +function install_vimemu() { + echo "\nInstalling vim-emu" + EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")" + trap 'rm -rf "${EMUTEMPDIR}"' EXIT + # install prerequisites (OVS is a must for the emulator to work) + sudo apt-get install openvswitch-switch + # clone vim-emu repository (attention: branch is currently master only) + echo "Cloning vim-emu repository ..." + git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR + # build vim-emu docker + echo "Building vim-emu Docker container..." + + sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image" + # start vim-emu container as daemon + echo "Starting vim-emu Docker container 'vim-emu' ..." + if [ -n "$INSTALL_LIGHTWEIGHT" ]; then + # in lightweight mode, the emulator needs to be attached to netOSM + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + else + # classic build mode + sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py" + fi + echo "Waiting for 'vim-emu' container to start ..." + sleep 5 + export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu") + echo "vim-emu running at ${VIMEMU_HOSTNAME} ..." + # print vim-emu connection info + echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:" + echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}" + echo -e "To add the emulated VIM to OSM you should do:" + echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack" +} + +function install_k8s_monitoring() { + # install OSM monitoring + $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh +} + +function uninstall_k8s_monitoring() { + # uninstall OSM monitoring + $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh +} + +function dump_vars(){ + echo "DEVELOP=$DEVELOP" + echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE" + echo "UNINSTALL=$UNINSTALL" + echo "UPDATE=$UPDATE" + echo "RECONFIGURE=$RECONFIGURE" + echo "TEST_INSTALLER=$TEST_INSTALLER" + echo "INSTALL_VIMEMU=$INSTALL_VIMEMU" + echo "INSTALL_PLA=$INSTALL_PLA" + echo "INSTALL_LXD=$INSTALL_LXD" + echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT" + echo "INSTALL_ONLY=$INSTALL_ONLY" + echo "INSTALL_ELK=$INSTALL_ELK" + echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES" + #echo "INSTALL_PERFMON=$INSTALL_PERFMON" + echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK" + echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME" + echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD" + echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME" + echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE" + echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE" + echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME" + echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR" + echo "TO_REBUILD=$TO_REBUILD" + echo "INSTALL_NOLXD=$INSTALL_NOLXD" + echo "INSTALL_NODOCKER=$INSTALL_NODOCKER" + echo "INSTALL_NOJUJU=$INSTALL_NOJUJU" + echo "RELEASE=$RELEASE" + echo "REPOSITORY=$REPOSITORY" + echo "REPOSITORY_BASE=$REPOSITORY_BASE" + echo "REPOSITORY_KEY=$REPOSITORY_KEY" + echo "OSM_DEVOPS=$OSM_DEVOPS" + echo "OSM_VCA_HOST=$OSM_VCA_HOST" + echo "OSM_VCA_SECRET=$OSM_VCA_SECRET" + echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY" + echo "NO_HOST_PORTS=$NO_HOST_PORTS" + echo "DOCKER_NOBUILD=$DOCKER_NOBUILD" + echo "WORKDIR_SUDO=$WORKDIR_SUDO" + echo "OSM_WORK_DIR=$OSM_WORK_DIR" + echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG" + echo "DOCKER_USER=$DOCKER_USER" + echo "OSM_STACK_NAME=$OSM_STACK_NAME" + echo "PULL_IMAGES=$PULL_IMAGES" + echo "KUBERNETES=$KUBERNETES" + echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" + echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL" + echo "SHOWOPTS=$SHOWOPTS" + echo "Install from specific refspec (-b): $COMMIT_ID" +} + +function track(){ + ctime=`date +%s` + duration=$((ctime - SESSION_ID)) + url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}" + #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}" + event_name="bin" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc" + [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd" + [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw" + event_name="${event_name}_$1" + url="${url}&event=${event_name}&ce_duration=${duration}" + wget -q -O /dev/null $url +} + +function parse_docker_registry_url() { + DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}') + DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}') + DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}') +} + +JUJU_AGENT_VERSION=2.8.6 +UNINSTALL="" +DEVELOP="" +UPDATE="" +RECONFIGURE="" +TEST_INSTALLER="" +INSTALL_LXD="" +SHOWOPTS="" +COMMIT_ID="" +ASSUME_YES="" +INSTALL_FROM_SOURCE="" +RELEASE="ReleaseTEN" +REPOSITORY="stable" +INSTALL_VIMEMU="" +INSTALL_PLA="" +LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd" +LXD_REPOSITORY_PATH="" +INSTALL_LIGHTWEIGHT="y" +INSTALL_TO_OPENSTACK="" +OPENSTACK_OPENRC_FILE_OR_CLOUD="" +OPENSTACK_PUBLIC_NET_NAME="" +OPENSTACK_ATTACH_VOLUME="false" +OPENSTACK_SSH_KEY_FILE="" +OPENSTACK_USERDATA_FILE="" +OPENSTACK_VM_NAME="server-osm" +OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm" +INSTALL_ONLY="" +INSTALL_ELK="" +TO_REBUILD="" +INSTALL_NOLXD="" +INSTALL_NODOCKER="" +INSTALL_NOJUJU="" +KUBERNETES="y" +INSTALL_K8S_MONITOR="" +INSTALL_NOHOSTCLIENT="" +INSTALL_NOCACHELXDIMAGES="" +SESSION_ID=`date +%s` +OSM_DEVOPS= +OSM_VCA_HOST= +OSM_VCA_SECRET= +OSM_VCA_PUBKEY= +OSM_VCA_CLOUDNAME="localhost" +OSM_VCA_K8S_CLOUDNAME="k8scloud" +OSM_STACK_NAME=osm +NO_HOST_PORTS="" +DOCKER_NOBUILD="" +REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg" +REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian" +WORKDIR_SUDO=sudo +OSM_WORK_DIR="/etc/osm" +OSM_DOCKER_WORK_DIR="/etc/osm/docker" +OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods" +OSM_HOST_VOL="/var/lib/osm" +OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +OSM_DOCKER_TAG=latest +DOCKER_USER=opensourcemano +PULL_IMAGES="y" +KAFKA_TAG=2.11-1.0.2 +PROMETHEUS_TAG=v2.4.3 +GRAFANA_TAG=latest +PROMETHEUS_NODE_EXPORTER_TAG=0.18.1 +PROMETHEUS_CADVISOR_TAG=latest +KEYSTONEDB_TAG=10 +OSM_DATABASE_COMMONKEY= +ELASTIC_VERSION=6.4.2 +ELASTIC_CURATOR_VERSION=5.5.4 +POD_NETWORK_CIDR=10.244.0.0/16 +K8S_MANIFEST_DIR="/etc/kubernetes/manifests" +RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' +DOCKER_REGISTRY_URL= +DOCKER_PROXY_URL= +MODULE_DOCKER_TAG= + +while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do + case "${o}" in + b) + COMMIT_ID=${OPTARG} + PULL_IMAGES="" + ;; + r) + REPOSITORY="${OPTARG}" + REPO_ARGS+=(-r "$REPOSITORY") + ;; + c) + [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue + [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue + echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2 + usage && exit 1 + ;; + k) + REPOSITORY_KEY="${OPTARG}" + REPO_ARGS+=(-k "$REPOSITORY_KEY") + ;; + u) + REPOSITORY_BASE="${OPTARG}" + REPO_ARGS+=(-u "$REPOSITORY_BASE") + ;; + R) + RELEASE="${OPTARG}" + REPO_ARGS+=(-R "$RELEASE") + ;; + D) + OSM_DEVOPS="${OPTARG}" + ;; + o) + INSTALL_ONLY="y" + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + ;; + O) + INSTALL_TO_OPENSTACK="y" + if [ -n "${OPTARG}" ]; then + OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}" + else + echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2 + usage && exit 1 + fi + ;; + f) + OPENSTACK_SSH_KEY_FILE="${OPTARG}" + ;; + F) + OPENSTACK_USERDATA_FILE="${OPTARG}" + ;; + N) + OPENSTACK_PUBLIC_NET_NAME="${OPTARG}" + ;; + m) + [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue + [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue + [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue + [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue + [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue + [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue + [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue + [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue + [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue + [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue + [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue + [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue + [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue + [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue + [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue + ;; + H) + OSM_VCA_HOST="${OPTARG}" + ;; + S) + OSM_VCA_SECRET="${OPTARG}" + ;; + s) + OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0 + ;; + w) + # when specifying workdir, do not use sudo for access + WORKDIR_SUDO= + OSM_WORK_DIR="${OPTARG}" + ;; + t) + OSM_DOCKER_TAG="${OPTARG}" + REPO_ARGS+=(-t "$OSM_DOCKER_TAG") + ;; + U) + DOCKER_USER="${OPTARG}" + ;; + P) + OSM_VCA_PUBKEY=$(cat ${OPTARG}) + ;; + A) + OSM_VCA_APIPROXY="${OPTARG}" + ;; + l) + LXD_CLOUD_FILE="${OPTARG}" + ;; + L) + LXD_CRED_FILE="${OPTARG}" + ;; + K) + CONTROLLER_NAME="${OPTARG}" + ;; + d) + DOCKER_REGISTRY_URL="${OPTARG}" + ;; + p) + DOCKER_PROXY_URL="${OPTARG}" + ;; + T) + MODULE_DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue + [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue + [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue + [ "${OPTARG}" == "update" ] && UPDATE="y" && continue + [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue + [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue + [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue + [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue + [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue + [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue + [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue + [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue + [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue + [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue + [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue + [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue + [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue + [ "${OPTARG}" == "pullimages" ] && continue + [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue + [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue + [ "${OPTARG}" == "bundle" ] && continue + [ "${OPTARG}" == "k8s" ] && continue + [ "${OPTARG}" == "lxd" ] && continue + [ "${OPTARG}" == "lxd-cred" ] && continue + [ "${OPTARG}" == "microstack" ] && continue + [ "${OPTARG}" == "overlay" ] && continue + [ "${OPTARG}" == "only-vca" ] && continue + [ "${OPTARG}" == "vca" ] && continue + [ "${OPTARG}" == "ha" ] && continue + [ "${OPTARG}" == "tag" ] && continue + [ "${OPTARG}" == "registry" ] && continue + [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue + [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue + [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue + echo -e "Invalid option: '--$OPTARG'\n" >&2 + usage && exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + y) + ASSUME_YES="y" + ;; + *) + usage && exit 1 + ;; + esac +done + +[ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options" +[ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option" + +if [ -n "$SHOWOPTS" ]; then + dump_vars + exit 0 +fi + +if [ -n "$CHARMED" ]; then + if [ -n "$UNINSTALL" ]; then + ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + else + ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@" + fi + + exit 0 +fi + +# if develop, we force master +[ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master" + +need_packages="git wget curl tar" + +[ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0 + +echo -e "Checking required packages: $need_packages" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get update \ + || FATAL "failed to run apt-get update" +dpkg -l $need_packages &>/dev/null \ + || ! echo -e "Installing $need_packages requires root privileges." \ + || sudo apt-get install -y $need_packages \ + || FATAL "failed to install $need_packages" +sudo snap install jq +if [ -z "$OSM_DEVOPS" ]; then + if [ -n "$TEST_INSTALLER" ]; then + echo -e "\nUsing local devops repo for OSM installation" + OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))" + else + echo -e "\nCreating temporary dir for OSM installation" + OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")" + trap 'rm -rf "$OSM_DEVOPS"' EXIT + + git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS + + if [ -z "$COMMIT_ID" ]; then + echo -e "\nGuessing the current stable release" + LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1` + [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0 + + echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS" + COMMIT_ID="tags/$LATEST_STABLE_DEVOPS" + else + echo -e "\nDEVOPS Using commit $COMMIT_ID" + fi + git -C $OSM_DEVOPS checkout $COMMIT_ID + fi +fi + +. $OSM_DEVOPS/common/all_funcs + +[ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME" +[ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}" +[ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0 +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk +#[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu +[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring +[ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0 + +#Installation starts here +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null +track start + +[ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0 +echo -e "\nInstalling OSM from refspec: $COMMIT_ID" +if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then + ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1 +fi + +echo -e "Checking required packages: lxd" +lxd --version &>/dev/null || FATAL "lxd not present, exiting." +[ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd + +# use local devops for containers +export OSM_USE_LOCAL_DEVOPS=true + +#Install osmclient + +#Install vim-emu (optional) +[ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu + +wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null +track end +echo -e "\nDONE" diff --git a/_tmp/osm-install/rel10.install_osm.sh b/_tmp/osm-install/rel10.install_osm.sh new file mode 100644 index 0000000..a581d43 --- /dev/null +++ b/_tmp/osm-install/rel10.install_osm.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +REPOSITORY_BASE=https://osm-download.etsi.org/repository/osm/debian +RELEASE=ReleaseTEN +REPOSITORY=stable +DOCKER_TAG=10 +DEVOPS_PATH=/usr/share/osm-devops + +function usage(){ + echo -e "usage: $0 [OPTIONS]" + echo -e "Install OSM from binaries or source code (by default, from binaries)" + echo -e " OPTIONS" + echo -e " -h / --help: print this help" + echo -e " -y: do not prompt for confirmation, assumes yes" + echo -e " -r : use specified repository name for osm packages" + echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)" + echo -e " -u : use specified repository url for osm packages" + echo -e " -k : use specified repository public key url" + echo -e " -b : install OSM from source code using a specific branch (master, v2.0, ...) or tag" + echo -e " -b master (main dev branch)" + echo -e " -b v2.0 (v2.0 branch)" + echo -e " -b tags/v1.1.0 (a specific tag)" + echo -e " ..." + echo -e " -c deploy osm services using container . Valid values are or . If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled" + echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm" + echo -e " -H use specific juju host controller IP" + echo -e " -S use VCA/juju secret key" + echo -e " -P use VCA/juju public key file" + echo -e " -C use VCA/juju CA certificate file" + echo -e " -A use VCA/juju API proxy" + echo -e " --vimemu: additionally deploy the VIM emulator as a docker container" + echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging" + echo -e " --pla: install the PLA module for placement support" + echo -e " -m : install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)" + echo -e " -o : ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)" + echo -e " -O : Install OSM to an OpenStack infrastructure. is required. If a is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/" + echo -e " -N : Public network name required to setup OSM to OpenStack" + echo -e " -D use local devops installation path" + echo -e " -w Location to store runtime installation" + echo -e " -t specify osm docker tag (default is latest)" + echo -e " -l: LXD cloud yaml file" + echo -e " -L: LXD credentials yaml file" + echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped" + echo -e " -d use docker registry URL instead of dockerhub" + echo -e " -p set docker proxy URL as part of docker CE configuration" + echo -e " -T specify docker tag for the modules specified with option -m" + echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)" + echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)" + echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)" + echo -e " --nojuju: do not juju, assumes already installed" + echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)" + echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)" + echo -e " --nohostclient: do not install the osmclient" + echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules" + echo -e " --source: install OSM from source code using the latest stable tag" + echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch" + echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano" + echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana" + echo -e " --volume: create a VM volume when installing to OpenStack" + echo -e " --showopts: print chosen options and exit (only for debugging)" + echo -e " --charmed: Deploy and operate OSM with Charms on k8s" + echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)" + echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)" + echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)" + echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)" + echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)" + echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)" + echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)" + echo -e " [--ha]: Installs High Availability bundle. (--charmed option)" + echo -e " [--tag]: Docker image tag. (--charmed option)" + echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)" + +} + +add_repo() { + REPO_CHECK="^$1" + grep "${REPO_CHECK/\[arch=amd64\]/\\[arch=amd64\\]}" /etc/apt/sources.list > /dev/null 2>&1 + if [ $? -ne 0 ] + then + need_packages_lw="software-properties-common apt-transport-https" + echo -e "Checking required packages: $need_packages_lw" + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \ + || sudo apt-get -q update \ + || ! echo "failed to run apt-get update" \ + || exit 1 + dpkg -l $need_packages_lw &>/dev/null \ + || ! echo -e "Installing $need_packages_lw requires root privileges." \ + || sudo apt-get install -y $need_packages_lw \ + || ! echo "failed to install $need_packages_lw" \ + || exit 1 + wget -qO - $REPOSITORY_BASE/$RELEASE/OSM%20ETSI%20Release%20Key.gpg | sudo apt-key add - + sudo DEBIAN_FRONTEND=noninteractive add-apt-repository -y "$1" && sudo DEBIAN_FRONTEND=noninteractive apt-get update + return 0 + fi + + return 1 +} + +clean_old_repo() { +dpkg -s 'osm-devops' &> /dev/null +if [ $? -eq 0 ]; then + # Clean the previous repos that might exist + sudo sed -i "/osm-download.etsi.org/d" /etc/apt/sources.list +fi +} + +while getopts ":b:r:c:n:k:u:R:l:L:K:p:D:o:O:m:N:H:S:s:w:t:U:P:A:d:p:f:F:-: hy" o; do + case "${o}" in + D) + DEVOPS_PATH="${OPTARG}" + ;; + r) + REPOSITORY="${OPTARG}" + ;; + R) + RELEASE="${OPTARG}" + ;; + u) + REPOSITORY_BASE="${OPTARG}" + ;; + t) + DOCKER_TAG="${OPTARG}" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + ;; + :) + echo "Option -$OPTARG requires an argument" >&2 + usage && exit 1 + ;; + \?) + echo -e "Invalid option: '-$OPTARG'\n" >&2 + usage && exit 1 + ;; + h) + usage && exit 0 + ;; + *) + ;; + esac +done + +clean_old_repo +add_repo "deb [arch=amd64] $REPOSITORY_BASE/$RELEASE $REPOSITORY devops" +sudo DEBIAN_FRONTEND=noninteractive apt-get -q update +sudo DEBIAN_FRONTEND=noninteractive apt-get install osm-devops +$DEVOPS_PATH/installers/full_install_osm.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $DEVOPS_PATH -t $DOCKER_TAG "$@" \ No newline at end of file diff --git a/_tmp/osm-mitm/client.original.py b/_tmp/osm-mitm/client.original.py new file mode 100644 index 0000000..c7f043b --- /dev/null +++ b/_tmp/osm-mitm/client.original.py @@ -0,0 +1,161 @@ +# Copyright 2018 Telefonica +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +OSM SOL005 client API +""" + +# from osmclient.v1 import vca +from osmclient.sol005 import vnfd +from osmclient.sol005 import nsd +from osmclient.sol005 import nst +from osmclient.sol005 import nsi +from osmclient.sol005 import ns +from osmclient.sol005 import vnf +from osmclient.sol005 import vim +from osmclient.sol005 import wim +from osmclient.sol005 import package +from osmclient.sol005 import http +from osmclient.sol005 import sdncontroller +from osmclient.sol005 import project as projectmodule +from osmclient.sol005 import user as usermodule +from osmclient.sol005 import role +from osmclient.sol005 import pdud +from osmclient.sol005 import k8scluster +from osmclient.sol005 import vca +from osmclient.sol005 import repo +from osmclient.sol005 import osmrepo +from osmclient.sol005 import subscription +from osmclient.common import package_tool +import json +import logging + + +class Client(object): + def __init__( + self, + host=None, + so_port=9999, + user="admin", + password="admin", + project="admin", + **kwargs + ): + + self._user = user + self._password = password + self._project = project + self._project_domain_name = kwargs.get("project_domain_name") + self._user_domain_name = kwargs.get("user_domain_name") + self._logger = logging.getLogger("osmclient") + self._auth_endpoint = "/admin/v1/tokens" + self._headers = {} + self._token = None + if len(host.split(":")) > 1: + # backwards compatible, port provided as part of host + self._host = host.split(":")[0] + self._so_port = host.split(":")[1] + else: + self._host = host + self._so_port = so_port + + self._http_client = http.Http( + "https://{}:{}/osm".format(self._host, self._so_port), **kwargs + ) + self._headers["Accept"] = "application/json" + self._headers["Content-Type"] = "application/yaml" + http_header = [ + "{}: {}".format(key, val) for (key, val) in list(self._headers.items()) + ] + self._http_client.set_http_header(http_header) + + self.vnfd = vnfd.Vnfd(self._http_client, client=self) + self.nsd = nsd.Nsd(self._http_client, client=self) + self.nst = nst.Nst(self._http_client, client=self) + self.package = package.Package(self._http_client, client=self) + self.ns = ns.Ns(self._http_client, client=self) + self.nsi = nsi.Nsi(self._http_client, client=self) + self.vim = vim.Vim(self._http_client, client=self) + self.wim = wim.Wim(self._http_client, client=self) + self.sdnc = sdncontroller.SdnController(self._http_client, client=self) + self.vnf = vnf.Vnf(self._http_client, client=self) + self.project = projectmodule.Project(self._http_client, client=self) + self.user = usermodule.User(self._http_client, client=self) + self.role = role.Role(self._http_client, client=self) + self.pdu = pdud.Pdu(self._http_client, client=self) + self.k8scluster = k8scluster.K8scluster(self._http_client, client=self) + self.vca = vca.VCA(self._http_client, client=self) + self.repo = repo.Repo(self._http_client, client=self) + self.osmrepo = osmrepo.OSMRepo(self._http_client, client=self) + self.package_tool = package_tool.PackageTool(client=self) + self.subscription = subscription.Subscription(self._http_client, client=self) + """ + self.vca = vca.Vca(http_client, client=self, **kwargs) + self.utils = utils.Utils(http_client, **kwargs) + """ + + def get_token(self): + self._logger.debug("") + if self._token is None: + postfields_dict = { + "username": self._user, + "password": self._password, + "project_id": self._project, + } + if self._project_domain_name: + postfields_dict["project_domain_name"] = self._project_domain_name + if self._user_domain_name: + postfields_dict["user_domain_name"] = self._user_domain_name + http_code, resp = self._http_client.post_cmd( + endpoint=self._auth_endpoint, + postfields_dict=postfields_dict, + skip_query_admin=True, + ) + # if http_code not in (200, 201, 202, 204): + # message ='Authentication error: not possible to get auth token\nresp:\n{}'.format(resp) + # raise ClientException(message) + + token = json.loads(resp) if resp else None + self._token = token["id"] + + if self._token is not None: + self._headers["Authorization"] = "Bearer {}".format(self._token) + http_header = [ + "{}: {}".format(key, val) + for (key, val) in list(self._headers.items()) + ] + self._http_client.set_http_header(http_header) + + def get_version(self): + _, resp = self._http_client.get2_cmd(endpoint="/version", skip_query_admin=True) + # print(http_code, resp) + try: + resp = json.loads(resp) + version = resp.get("version") + date = resp.get("date") + except ValueError: + version = resp.split()[2] + date = resp.split()[4] + return "{} {}".format(version, date) + + def set_default_params(self, **kwargs): + host = kwargs.pop("host", None) + if host is not None: + self._host = host + port = kwargs.pop("port", None) + if port is not None: + self._so_port = port + self._http_client.set_query_admin(**kwargs) diff --git a/_tmp/osm-mitm/client.py b/_tmp/osm-mitm/client.py new file mode 100644 index 0000000..1cd8ee5 --- /dev/null +++ b/_tmp/osm-mitm/client.py @@ -0,0 +1,162 @@ +# Copyright 2018 Telefonica +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +OSM SOL005 client API +""" + +# from osmclient.v1 import vca +from osmclient.sol005 import vnfd +from osmclient.sol005 import nsd +from osmclient.sol005 import nst +from osmclient.sol005 import nsi +from osmclient.sol005 import ns +from osmclient.sol005 import vnf +from osmclient.sol005 import vim +from osmclient.sol005 import wim +from osmclient.sol005 import package +from osmclient.sol005 import http +from osmclient.sol005 import sdncontroller +from osmclient.sol005 import project as projectmodule +from osmclient.sol005 import user as usermodule +from osmclient.sol005 import role +from osmclient.sol005 import pdud +from osmclient.sol005 import k8scluster +from osmclient.sol005 import vca +from osmclient.sol005 import repo +from osmclient.sol005 import osmrepo +from osmclient.sol005 import subscription +from osmclient.common import package_tool +import json +import logging + + +class Client(object): + def __init__( + self, + host=None, + so_port=9999, + user="admin", + password="admin", + project="admin", + **kwargs + ): + + self._user = user + self._password = password + self._project = project + self._project_domain_name = kwargs.get("project_domain_name") + self._user_domain_name = kwargs.get("user_domain_name") + self._logger = logging.getLogger("osmclient") + self._auth_endpoint = "/admin/v1/tokens" + self._headers = {} + self._token = None + if len(host.split(":")) > 1: + # backwards compatible, port provided as part of host + self._host = host.split(":")[0] + self._so_port = host.split(":")[1] + else: + self._host = host + self._so_port = so_port + + self._http_client = http.Http( + # "https://{}:{}/osm".format(self._host, self._so_port), **kwargs + "http://localhost/osm", **kwargs + ) + self._headers["Accept"] = "application/json" + self._headers["Content-Type"] = "application/yaml" + http_header = [ + "{}: {}".format(key, val) for (key, val) in list(self._headers.items()) + ] + self._http_client.set_http_header(http_header) + + self.vnfd = vnfd.Vnfd(self._http_client, client=self) + self.nsd = nsd.Nsd(self._http_client, client=self) + self.nst = nst.Nst(self._http_client, client=self) + self.package = package.Package(self._http_client, client=self) + self.ns = ns.Ns(self._http_client, client=self) + self.nsi = nsi.Nsi(self._http_client, client=self) + self.vim = vim.Vim(self._http_client, client=self) + self.wim = wim.Wim(self._http_client, client=self) + self.sdnc = sdncontroller.SdnController(self._http_client, client=self) + self.vnf = vnf.Vnf(self._http_client, client=self) + self.project = projectmodule.Project(self._http_client, client=self) + self.user = usermodule.User(self._http_client, client=self) + self.role = role.Role(self._http_client, client=self) + self.pdu = pdud.Pdu(self._http_client, client=self) + self.k8scluster = k8scluster.K8scluster(self._http_client, client=self) + self.vca = vca.VCA(self._http_client, client=self) + self.repo = repo.Repo(self._http_client, client=self) + self.osmrepo = osmrepo.OSMRepo(self._http_client, client=self) + self.package_tool = package_tool.PackageTool(client=self) + self.subscription = subscription.Subscription(self._http_client, client=self) + """ + self.vca = vca.Vca(http_client, client=self, **kwargs) + self.utils = utils.Utils(http_client, **kwargs) + """ + + def get_token(self): + self._logger.debug("") + if self._token is None: + postfields_dict = { + "username": self._user, + "password": self._password, + "project_id": self._project, + } + if self._project_domain_name: + postfields_dict["project_domain_name"] = self._project_domain_name + if self._user_domain_name: + postfields_dict["user_domain_name"] = self._user_domain_name + http_code, resp = self._http_client.post_cmd( + endpoint=self._auth_endpoint, + postfields_dict=postfields_dict, + skip_query_admin=True, + ) + # if http_code not in (200, 201, 202, 204): + # message ='Authentication error: not possible to get auth token\nresp:\n{}'.format(resp) + # raise ClientException(message) + + token = json.loads(resp) if resp else None + self._token = token["id"] + + if self._token is not None: + self._headers["Authorization"] = "Bearer {}".format(self._token) + http_header = [ + "{}: {}".format(key, val) + for (key, val) in list(self._headers.items()) + ] + self._http_client.set_http_header(http_header) + + def get_version(self): + _, resp = self._http_client.get2_cmd(endpoint="/version", skip_query_admin=True) + # print(http_code, resp) + try: + resp = json.loads(resp) + version = resp.get("version") + date = resp.get("date") + except ValueError: + version = resp.split()[2] + date = resp.split()[4] + return "{} {}".format(version, date) + + def set_default_params(self, **kwargs): + host = kwargs.pop("host", None) + if host is not None: + self._host = host + port = kwargs.pop("port", None) + if port is not None: + self._so_port = port + self._http_client.set_query_admin(**kwargs) diff --git a/_tmp/osm-mitm/intercept.md b/_tmp/osm-mitm/intercept.md new file mode 100644 index 0000000..a8c0efa --- /dev/null +++ b/_tmp/osm-mitm/intercept.md @@ -0,0 +1,30 @@ +Intercepting OSM client messages +-------------------------------- + +OSM client only uses HTTPS. Since the code doesn't verify the server +identity, it's easy to set up a man-in-the-middle attack to observe +HTTP traffic between the client and the server---i.e. the NBI, OSM's +north bound interface. To do that, you can install `stunnel` and tweak +the config in this dir---have a look at `stunnel-mitm-proxy.conf`. + +An easier way to catch HTTP messages is to tweak the client code to +make it use plain HTTP without TLS. To see how diff the two Python +files in this dir. + +To monitor HTTP traffic in the OSM host, you have to replace + + /usr/lib/python3/dist-packages/osmclient/sol005/client.py + +with `client.py` in this dir, then + +```bash + $ multipass shell osm + [osm]$ sudo tcpdump -i cni0 -s 1024 -A port 80 +``` + +and use the OSM client, e.g. + +```bash + $ multipass shell osm + [osm]$ osm ns-op-list ldap +``` diff --git a/_tmp/osm-mitm/message-flow.ns-action.upgrade.md b/_tmp/osm-mitm/message-flow.ns-action.upgrade.md new file mode 100644 index 0000000..7ba8828 --- /dev/null +++ b/_tmp/osm-mitm/message-flow.ns-action.upgrade.md @@ -0,0 +1,389 @@ +HTTP message flow for NS upgrade +-------------------------------- + +### GET NS instances content + +HTTP request + +```http +GET /osm/nslcm/v1/ns_instances_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer T1kxQMgnqty50kmjQZnvfenBbekDt1iG +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 14:53:57 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 12959 +Connection: keep-alive +Set-Cookie: session_id=6460b870282dd3d61fc7c72ff6d9cbc4a738d1ff; expires=Fri, 10 Sep 2021 15:53:57 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[ + { + "_id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "name": "ldap", + "name-ref": "ldap", + "short-name": "ldap", + "admin-status": "ENABLED", + "nsState": "READY", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "ae63ee09-847f-4108-9a22-852899b6e0ae" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631277626.5666356, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631277626.626409, + "modified": 1631285336.7610166, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0098084071", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'info': {'deleted': '', 'description': 'Install complete', 'first_deployed': '2021-09-10T12:40:56.55575157Z', 'last_deployed': '2021-09-10T12:40:56.55575157Z', 'status': 'deployed'}, 'name': 'stable-openldap-1-2-3-0098084071', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 1}", + "operation": "install", + "status": "Install complete", + "status-time": "1631277711.4568162" + } + ] + } + } + }, + { + "_id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "name": "ldap2", + "name-ref": "ldap2", + "short-name": "ldap2", + "admin-status": "ENABLED", + "nsState": "READ", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "609ae829-8fbe-44f1-944d-2fba5cd909c2" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631282159.0447648, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap2", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "136fcc46-c363-4d74-af14-c115fff7d80a", + "id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631282159.0555632, + "modified": 1631285403.5654724, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0044064996", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'config': {'replicaCount': '2'}, 'info': {'deleted': '', 'description': 'Install complete', 'first_deployed': '2021-09-10T13:56:20.089257801Z', 'last_deployed': '2021-09-10T13:56:20.089257801Z', 'status': 'deployed'}, 'name': 'stable-openldap-1-2-3-0044064996', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 1}", + "operation": "install", + "status": "Install complete", + "status-time": "1631282216.1732676" + } + ] + } + } + } +] +``` + +Notice OSM client reissues the same `GET` again after this. This duplication +of HTTP requests might well be a bug... + + +### POST target NS instance action + +HTTP request + +```http +POST /osm/nslcm/v1/ns_instances/0335c32c-d28c-4d79-9b94-0ffa36326932/action HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer T1kxQMgnqty50kmjQZnvfenBbekDt1iG +Content-Length: 136 + +{"member_vnf_index": "openldap", "kdu_name": "ldap", "primitive": "upgrade", "primitive_params": {"kdu_model": "stable/openldap:1.2.2"}} +``` + +HTTP response + +```http +HTTP/1.1 202 Accepted +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 14:53:57 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 53 +Connection: keep-alive +Location: /osm/nslcm/v1/ns_lcm_op_occs/5c6e4a0d-6238-4aa8-9147-e4e738bf16f4 +Set-Cookie: session_id=b7d8768fac6d48c9fc51f6c682592273cc7a3f63; expires=Fri, 10 Sep 2021 15:53:57 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "5c6e4a0d-6238-4aa8-9147-e4e738bf16f4" +} +``` diff --git a/_tmp/osm-mitm/message-flow.ns-action.upgrade2.md b/_tmp/osm-mitm/message-flow.ns-action.upgrade2.md new file mode 100644 index 0000000..419aa62 --- /dev/null +++ b/_tmp/osm-mitm/message-flow.ns-action.upgrade2.md @@ -0,0 +1,389 @@ +HTTP message flow for NS upgrade +-------------------------------- + +### GET NS instances content + +HTTP request + +```http +GET /osm/nslcm/v1/ns_instances_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer Rxb9XReQHdW6XmtjKLFLToLs0W0XbD7n +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 17:24:59 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 13396 +Connection: keep-alive +Set-Cookie: session_id=02efc3019fd72867333ec8223528ed6fbcf022ed; expires=Fri, 10 Sep 2021 18:24:59 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[ + { + "_id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "name": "ldap", + "name-ref": "ldap", + "short-name": "ldap", + "admin-status": "ENABLED", + "nsState": "READY", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "ae63ee09-847f-4108-9a22-852899b6e0ae" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631277626.5666356, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631277626.626409, + "modified": 1631285667.3411994, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0098084071", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'info': {'deleted': '', 'description': 'Rollback \"stable-openldap-1-2-3-0098084071\" failed: cannot patch \"stable-openldap-1-2-3-0098084071\" with kind Service: Service \"stable-openldap-1-2-3-0098084071\" is invalid: spec.clusterIP: Invalid value: \"\": field is immutable', 'first_deployed': '2021-09-10T12:40:56.55575157Z', 'last_deployed': '2021-09-10T14:54:26.378456605Z', 'status': 'failed'}, 'name': 'stable-openldap-1-2-3-0098084071', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 3}", + "operation": "upgrade", + "status": "Rollback \"stable-openldap-1-2-3-0098084071\" failed: cannot patch \"stable-openldap-1-2-3-0098084071\" with kind Service: Service \"stab\" is invalid: spec.clusterIP: Invalid value: \"\": field is immutable", + "status-time": "1631285667.3301775" + } + ] + } + } + }, + { + "_id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "name": "ldap2", + "name-ref": "ldap2", + "short-name": "ldap2", + "admin-status": "ENABLED", + "nsState": "READY", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "609ae829-8fbe-44f1-944d-2fba5cd909c2" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631282159.0447648, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap2", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "136fcc46-c363-4d74-af14-c115fff7d80a", + "id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631282159.0555632, + "modified": 1631285403.5654724, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0044064996", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'config': {'replicaCount': '2'}, 'info': {'deleted': '', 'description': 'Install complete', 'first_deployed': '2021-09-10T13:56:20.089257801Z', 'last_deployed': '2021-09-10T13:56:20.089257801Z', 'status': 'deployed'}, 'name': 'stable-openldap-1-2-3-0044064996', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 1}", + "operation": "install", + "status": "Install complete", + "status-time": "1631282216.1732676" + } + ] + } + } + } +] +``` + +Notice OSM client reissues the same `GET` again after this. This duplication +of HTTP requests might well be a bug... + + +### POST target NS instance action + +HTTP request + +```http +POST /osm/nslcm/v1/ns_instances/136fcc46-c363-4d74-af14-c115fff7d80a/action HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer Rxb9XReQHdW6XmtjKLFLToLs0W0XbD7n +Content-Length: 119 + +{"member_vnf_index": "openldap", "kdu_name": "ldap", "primitive": "upgrade", "primitive_params": {"replicaCount": "3"}} +``` + +HTTP response + +```http +HTTP/1.1 202 Accepted +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 17:25:00 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 53 +Connection: keep-alive +Location: /osm/nslcm/v1/ns_lcm_op_occs/f92f746f-c10a-448e-84e1-3acfd8b684cb +Set-Cookie: session_id=ca59ba9d0d29c0535d2273935498383e9af28a68; expires=Fri, 10 Sep 2021 18:24:59 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "f92f746f-c10a-448e-84e1-3acfd8b684cb" +} +``` diff --git a/_tmp/osm-mitm/message-flow.ns-create.md b/_tmp/osm-mitm/message-flow.ns-create.md new file mode 100644 index 0000000..26636c5 --- /dev/null +++ b/_tmp/osm-mitm/message-flow.ns-create.md @@ -0,0 +1,269 @@ +HTTP message flow for NS create +------------------------------- + + +### GET NS descriptors + +HTTP request + +```http +GET /osm/nsd/v1/ns_descriptors HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 12:40:26 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 2519 +Connection: keep-alive +Set-Cookie: session_id=3eaf925831bd0aa54527956e5f5ca009e3c0ee82; expires=Fri, 10 Sep 2021 13:40:26 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[ + { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + }, + "nsdOnboardingState": "ONBOARDED", + "nsdOperationalState": "ENABLED", + "nsdUsageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03" + }, + "nsd_content": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03/nsd_content" + } + } + } +] +``` + + +### GET VIM accounts + +HTTP request + +```http +GET /osm/admin/v1/vim_accounts HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 12:40:26 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 1187 +Connection: keep-alive +Set-Cookie: session_id=67f9cd441ece24102eed7d5f771dea5dc86a0cea; expires=Fri, 10 Sep 2021 13:40:26 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[ + { + "_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "name": "mylocation1", + "vim_type": "dummy", + "description": null, + "vim_url": "http://localhost/dummy", + "vim_user": "u", + "vim_password": "fNnfmd3KFXvfyVKu3nzItg==", + "vim_tenant_name": "p", + "_admin": { + "created": 1631212983.5388303, + "modified": 1631212983.5388303, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "operationalState": "ENABLED", + "operations": [ + { + "lcmOperationType": "create", + "operationState": "COMPLETED", + "startTime": 1631212983.5930278, + "statusEnteredTime": 1631212984.0220273, + "operationParams": null + } + ], + "current_operation": null, + "detailed-status": "" + }, + "schema_version": "1.11", + "admin": { + "current_operation": 0 + } + } +] +``` + + +### GET target VIM account + +HTTP request + +```http +GET /osm/admin/v1/vim_accounts/4a4425f7-3e72-4d45-a4ec-4241186f3547 HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 12:40:26 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 1039 +Connection: keep-alive +Set-Cookie: session_id=148a2c1099ef30f602784217aaac3d96db3214a7; expires=Fri, 10 Sep 2021 13:40:26 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "name": "mylocation1", + "vim_type": "dummy", + "description": null, + "vim_url": "http://localhost/dummy", + "vim_user": "u", + "vim_password": "fNnfmd3KFXvfyVKu3nzItg==", + "vim_tenant_name": "p", + "_admin": { + "created": 1631212983.5388303, + "modified": 1631212983.5388303, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "operationalState": "ENABLED", + "operations": [ + { + "lcmOperationType": "create", + "operationState": "COMPLETED", + "startTime": 1631212983.5930278, + "statusEnteredTime": 1631212984.0220273, + "operationParams": null + } + ], + "current_operation": null, + "detailed-status": "" + }, + "schema_version": "1.11", + "admin": { + "current_operation": 0 + } +} +``` + +Notice OSM client reissues the same `GET` again after this. This duplication +of HTTP requests might well be a bug... + + +### POST NS instance content + +HTTP request + +```http +POST /osm/nslcm/v1/ns_instances_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU +Content-Length: 163 + +{"nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", "nsName": "ldap", "nsDescription": "default description", "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547"} +``` + +HTTP response + +```http +HTTP/1.1 201 Created +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 12:40:26 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 111 +Connection: keep-alive +Location: /osm/nslcm/v1/ns_instances_content/0335c32c-d28c-4d79-9b94-0ffa36326932 +Set-Cookie: session_id=b97dbb71441703a0d650c5f66b1f08630dabc0b8; expires=Fri, 10 Sep 2021 13:40:26 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "nslcmop_id": "0c5464da-df42-498e-b306-76d470b76a0d" +} +``` \ No newline at end of file diff --git a/_tmp/osm-mitm/message-flow.ns-create2.md b/_tmp/osm-mitm/message-flow.ns-create2.md new file mode 100644 index 0000000..2465ece --- /dev/null +++ b/_tmp/osm-mitm/message-flow.ns-create2.md @@ -0,0 +1,203 @@ +HTTP message flow for NS create 2 +--------------------------------- + +### GET NS descriptors + +HTTP request + +```http +GET /osm/nsd/v1/ns_descriptors HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer lKGRJmJQdpz9yasXikIUdD5SivCf3tXF +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 13:55:58 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 2519 +Connection: keep-alive +Set-Cookie: session_id=b4a5a5b5b2519f6550ea3bea5efdbf2f4f9ad517; expires=Fri, 10 Sep 2021 14:55:58 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[ + { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + }, + "nsdOnboardingState": "ONBOARDED", + "nsdOperationalState": "ENABLED", + "nsdUsageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03" + }, + "nsd_content": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03/nsd_content" + } + } + } +] +``` + + +### GET target VIM account + +HTTP request + +```http +GET /osm/admin/v1/vim_accounts/4a4425f7-3e72-4d45-a4ec-4241186f3547 HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer lKGRJmJQdpz9yasXikIUdD5SivCf3tXF +``` + +HTTP response + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 13:55:58 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 1039 +Connection: keep-alive +Set-Cookie: session_id=850ace39d2abbe728dcfd089e7e9407b90a7ec43; expires=Fri, 10 Sep 2021 14:55:58 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "name": "mylocation1", + "vim_type": "dummy", + "description": null, + "vim_url": "http://localhost/dummy", + "vim_user": "u", + "vim_password": "fNnfmd3KFXvfyVKu3nzItg==", + "vim_tenant_name": "p", + "_admin": { + "created": 1631212983.5388303, + "modified": 1631212983.5388303, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "operationalState": "ENABLED", + "operations": [ + { + "lcmOperationType": "create", + "operationState": "COMPLETED", + "startTime": 1631212983.5930278, + "statusEnteredTime": 1631212984.0220273, + "operationParams": null + } + ], + "current_operation": null, + "detailed-status": "" + }, + "schema_version": "1.11", + "admin": { + "current_operation": 0 + } +} +``` + +Notice OSM client reissues the same `GET` again after this. This duplication +of HTTP requests might well be a bug... + + +### POST NS instance content + +HTTP request + +```http +POST /osm/nslcm/v1/ns_instances_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer lKGRJmJQdpz9yasXikIUdD5SivCf3tXF +Content-Length: 319 + +{"nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", "nsName": "ldap2", "nsDescription": "default description", "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547", "additionalParamsForVnf": [{"member-vnf-index": "openldap", "additionalParamsForKdu": [{"kdu_name": "ldap", "additionalParams": {"replicaCount": "2"}}]}]} +``` + +HTTP response + +```http +HTTP/1.1 201 Created +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 13:55:59 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 111 +Connection: keep-alive +Location: /osm/nslcm/v1/ns_instances_content/136fcc46-c363-4d74-af14-c115fff7d80a +Set-Cookie: session_id=0d478e27eccc45667ff15c405362764cb6add7cd; expires=Fri, 10 Sep 2021 14:55:58 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "nslcmop_id": "b7b6b8a6-f4f2-4862-99bc-f2c73fbf8736" +} +``` diff --git a/_tmp/osm-mitm/message-flows.md b/_tmp/osm-mitm/message-flows.md new file mode 100644 index 0000000..a09c23d --- /dev/null +++ b/_tmp/osm-mitm/message-flows.md @@ -0,0 +1,374 @@ +OSM client HTTP message flows +----------------------------- +> Or what the heck OSM client does under the bonnet. + + +### Getting an auth token + +This happens every time you run an `osm` command, i.e. tokens aren't cached! +Example flow + +```http +POST /osm/admin/v1/tokens HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Content-Length: 65 + +{"username": "admin", "password": "admin", "project_id": "admin"} +``` + +```http +HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Wed, 08 Sep 2021 17:52:11 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 549 +Connection: keep-alive +Www-Authenticate: Bearer realm="Needed a token or Authorization http header" +Location: /osm/admin/v1/tokens/TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2 +Set-Cookie: session_id=072faf1c629771cdad9133c133fe8bee1202f258; expires=Wed, 08 Sep 2021 18:52:11 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "issued_at": 1631123531.1251214, + "expires": 1631127131.1251214, + "_id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "project_id": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "project_name": "admin", + "username": "admin", + "user_id": "5c6f2d64-9c23-4718-806a-c74c3fc3c98f", + "admin": true, + "roles": [ + { + "name": "system_admin", + "id": "cb545e44-cd2b-4c0b-93aa-7e2cee79afc3" + } + ], +... +``` + +Notice the token is valid for an hour: + + issued_at = Wednesday, 8 September 2021 17:52:11.125 (GMT) + expires = Wednesday, 8 September 2021 18:52:11.125 (GMT) + + +### Getting the history of operations on an NS instance + +OSM client command + +```bash +$ osm ns-op-list ldap +ERROR: ns 'ldap' not found +``` + +HTTP request + +```http +GET /osm/nslcm/v1/ns_instances_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer qIFJhw2JkGbgBToJiuKgYNSKuFgnQlYX +``` + +HTTP response + +```http +.HTTP/1.1 200 OK +Server: nginx/1.14.0 (Ubuntu) +Date: Thu, 09 Sep 2021 14:19:53 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 3 +Connection: keep-alive +Set-Cookie: session_id=321df9a60ac919141432e830cfcd8cb306f31877; expires=Thu, 09 Sep 2021 15:19:53 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +[] +``` + + +### Creating a VIM account + +OSM client command + +```bash +$ osm vim-create --name openvim-site \ + --auth_url http://10.10.10.10:9080/openvim \ + --account_type openvim --description "Openvim site" \ + --tenant osm --user dummy --password dummy +59b92c04-29fa-42a7-923e-63322240b80e +``` + +HTTP request + +```http +POST /osm/admin/v1/vim_accounts HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/yaml +Authorization: Bearer TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2 +Content-Length: 196 + +{"name": "openvim-site", "vim_type": "openvim", "description": "Openvim site", "vim_url": "http://10.10.10.10:9080/openvim", "vim_user": "dummy", "vim_password": "dummy", "vim_tenant_name": "osm"} +``` + +HTTP response + +```http +HTTP/1.1 202 Accepted +Server: nginx/1.14.0 (Ubuntu) +Date: Wed, 08 Sep 2021 17:52:11 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 108 +Connection: keep-alive +Location: /osm/admin/v1/vim_accounts/59b92c04-29fa-42a7-923e-63322240b80e +Set-Cookie: session_id=4cd3ace1f2635ca888bbbb6d24a5905540345809; expires=Wed, 08 Sep 2021 18:52:11 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "59b92c04-29fa-42a7-923e-63322240b80e", + "op_id": "59b92c04-29fa-42a7-923e-63322240b80e:0" +} +``` + +Notice VIM account names have to be unique. In fact, OSM NBI enforces that. +If you try creating another VIM account with the same name, you get an error: + +```bash +$ curl localhost/osm/admin/v1/vim_accounts \ + -v -X POST \ + -H 'Authorization: Bearer TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2' \ + -H 'Content-Type: application/yaml' \ + -d'{"name": "openvim-site", "vim_type": "openvim", "description": "Openvim site", "vim_url": "http://10.10.10.10:9080/openvim", "vim_user": "dummy", "vim_password": "dummy", "vim_tenant_name": "osm"}' + +... +HTTP/1.1 409 Conflict +... +--- +code: CONFLICT +detail: name 'openvim-site' already exists for vim_accounts +status: 409 +``` + + +### KNF service onboarding and instantiation + +Example flow from OSM manual section: 5.6.5.1 KNF Helm Chart + +- https://osm.etsi.org/docs/user-guide/05-osm-usage.html#knf-helm-chart + +**NB**. Download package tarballs from: + +- https://osm-download.etsi.org/ftp/Packages/examples/ + +the repo in section 5.6.5.1 is outdated. + + +#### Onboarding + +OSM client command to upload a package with a VNFD for an Open LDAP KNF: + +```bash +$ osm nfpkg-create openldap_knf.tar.gz +``` + +HTTP request + +```http +POST /osm/vnfpkgm/v1/vnf_packages_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/gzip +Authorization: Bearer nOcHehp8wJcxSze8lJFzEKUBTI9iOdgk +Content-Filename: openldap_knf.tar.gz +Content-File-MD5: 6f10bac4462725413f4e14f185619ead +Content-Length: 449 + +......._..openldap_knf.tar... +``` + +HTTP response + +```http +HTTP/1.1 201 Created +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 10:07:25 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 53 +Connection: keep-alive +Location: /osm/vnfpkgm/v1/vnf_packages_content/d506d18f-0738-42ab-8b45-cfa98da38e7a +Set-Cookie: session_id=78799fcfb8463bfb1da410066fefa6c89e9ed1ec; expires=Fri, 10 Sep 2021 11:07:24 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "d506d18f-0738-42ab-8b45-cfa98da38e7a" +} +``` + +Notice OSM NBI enforces uniqueness of VNFD IDs. If you try uploading another +package with a VNFD having the same ID as the one we've just uploaded, OSM +NBI will complain loudly: + +```http +HTTP/1.1 409 Conflict +... +{ + "code": "CONFLICT", + "status": 409, + "detail": "vnfd with id 'openldap_knf' already exists for this project" +} +``` + +OSM client command to upload a package with a NSD for the Open LDAP KNF +defined by the previous package: + +```bash +$ osm nspkg-create openldap_ns.tar.gz +``` + +HTTP request + +```http +POST /osm/nsd/v1/ns_descriptors_content HTTP/1.1 +Host: localhost +User-Agent: PycURL/7.43.0.6 libcurl/7.58.0 OpenSSL/1.1.1 zlib/1.2.11 libidn2/2.0.4 libpsl/0.19.1 (+libidn2/2.0.4) nghttp2/1.30.0 librtmp/2.3 +Accept: application/json +Content-Type: application/gzip +Authorization: Bearer G3zXf7lFs91YmewaUQnU5yLc0hOMUyBD +Content-Filename: openldap_ns.tar.gz +Content-File-MD5: 38f617220c88c1c32a8c9be55d781041 +Content-Length: 977 + +......._..openldap_ns.tar.. +``` + +HTTP response + +```http +HTTP/1.1 201 Created +Server: nginx/1.14.0 (Ubuntu) +Date: Fri, 10 Sep 2021 10:10:37 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 53 +Connection: keep-alive +Location: /osm/nsd/v1/ns_descriptors_content/aba58e40-d65f-4f4e-be0a-e248c14d3e03 +Set-Cookie: session_id=e9ee44a81f693d768ffe4b7265ab8cfbcef078c0; expires=Fri, 10 Sep 2021 11:10:35 GMT; HttpOnly; Max-Age=3600; Path=/; Secure + +{ + "id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03" +} +``` + +Notice OSM NBI enforces uniqueness of NSD IDs. If you try uploading another +package with a NSD having the same ID as the one we've just uploaded, OSM +NBI will complain loudly: + +```http +HTTP/1.1 409 Conflict +... +{ + "code": "CONFLICT", + "status": 409, + "detail": "nsd with id 'openldap_ns' already exists for this project" +} +``` + +#### NS instantiation + +OSM client command to create an NS instance using the OpenLDAP chart uploaded +by the previous commands. Notice we use the VIM account name and OSM client +looks up the corresponding ID for us. Notice the name-ID lookup works because +OSM NBI enforces VIM name uniqueness---see earlier note about it. + +```bash +$ osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account mylocation1 +0335c32c-d28c-4d79-9b94-0ffa36326932 +``` + +[HTTP message flow](./message-flow.ns-create.md) + +OSM client command to create a second NS instance from the same chart but +this time with two replicas. Notice we use the VIM account ID this time and +OSM will use that ID as is. (**Question**: what's the algo to determine if +a string is a name or an ID?! Possibly another sore point here...) + +```bash +$ osm ns-create --ns_name ldap2 --nsd_name openldap_ns \ + --vim_account 4a4425f7-3e72-4d45-a4ec-4241186f3547 \ + --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", additionalParamsForKdu: [{ kdu_name: "ldap", "additionalParams": {"replicaCount": "2"}}]}]}' +136fcc46-c363-4d74-af14-c115fff7d80a +``` + +[HTTP message flow](./message-flow.ns-create2.md) + +Notice OSM NBI doesn't enforce uniqueness of NS names. In fact, it lets you +happily duplicate e.g. the `ldap` name we created earlier: + +```bash +$ curl localhost/osm/nslcm/v1/ns_instances_content \ + -v -X POST \ + -H 'Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU' \ + -H 'Content-Type: application/yaml' \ + -d'{"nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", "nsName": "ldap", "nsDescription": "default description", "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547"}' +... +HTTP/1.1 201 Created +... +--- +id: 794ef9a2-8bbb-42c1-869a-bab6422982ec +nslcmop_id: 0fdfaa6a-b742-480c-9701-122b3f732e4f +``` + +#### NS upgrade + +OSM client command to upgrade the first LDAP NS we created earlier. Notice +OSM client looks up the instance ID from the name we specify in the command +line (`ldap`), but this is **not** a good idea since, as noted earlier, +NS instance names aren't unique. To avoid wreaking havoc we should always +use NS instance IDs but I don't think OSM client actually supports that? + +```bash +$ osm ns-action ldap --vnf_name openldap --kdu_name ldap --action_name upgrade --params '{kdu_model: "stable/openldap:1.2.2"}' +5c6e4a0d-6238-4aa8-9147-e4e738bf16f4 +``` + +[HTTP message flow](./message-flow.ns-action.upgrade.md) + +This upgrade op eventually failed---OSM client always returns 0 since the +actual op gets executed server-side asynchronously. In fact, here's the +instance history after the op completed server-side: + +``` + ID action start end status +------------------------------------ ----------- -------------------- -------------------- ------ +0c5464da-df42-498e-b306-76d470b76a0d instantiate Sep-10-2021 14:40:26 Sep-10-2021 14:41:53 OK +5c6e4a0d-6238-4aa8-9147-e4e738bf16f4 action Sep-10-2021 16:53:57 Sep-10-2021 16:54:27 Failed +``` + +The error: + +``` +FAILED Executing kdu upgrade: Error executing command: +/usr/local/bin/helm3 upgrade stable-openldap-1-2-3-0098084071 stable/openldap --namespace fada443a-905c-4241-8a33-4dcdbdac55e7 --atomic --output yaml --timeout 1800s --version 1.2.2 + +Output: Error: UPGRADE FAILED: an error occurred while rolling back the release. +original upgrade error: +cannot patch "stable-openldap-1-2-3-0098084071" with kind Service: Service "stable-openldap-1-2-3-0098084071" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +OSM client command to up the number of replicas of the second LDAP NS we +created earlier. Notice this is slightly different than the example in the +OSM manual since I got rid of the `kdu_model` param that doesn't work as you +can see from the outcome of running the previous upgrade action. + +```bash +$ osm ns-action ldap2 --vnf_name openldap --kdu_name ldap --action_name upgrade --params '{"replicaCount": "3"}' +f92f746f-c10a-448e-84e1-3acfd8b684cb +``` + +[HTTP message flow](./message-flow.ns-action.upgrade2.md) + +But in the end this upgrade action didn't work either. Exactly the same error +as before got recorded in the NS instance action history. diff --git a/_tmp/osm-mitm/nfpkg-create.pcap b/_tmp/osm-mitm/nfpkg-create.pcap new file mode 100644 index 0000000..874e419 Binary files /dev/null and b/_tmp/osm-mitm/nfpkg-create.pcap differ diff --git a/_tmp/osm-mitm/ns-action.upgrade.pcap b/_tmp/osm-mitm/ns-action.upgrade.pcap new file mode 100644 index 0000000..f8f9296 Binary files /dev/null and b/_tmp/osm-mitm/ns-action.upgrade.pcap differ diff --git a/_tmp/osm-mitm/ns-action.upgrade2.pcap b/_tmp/osm-mitm/ns-action.upgrade2.pcap new file mode 100644 index 0000000..0bb369d Binary files /dev/null and b/_tmp/osm-mitm/ns-action.upgrade2.pcap differ diff --git a/_tmp/osm-mitm/ns-create.pcap b/_tmp/osm-mitm/ns-create.pcap new file mode 100644 index 0000000..24b9371 Binary files /dev/null and b/_tmp/osm-mitm/ns-create.pcap differ diff --git a/_tmp/osm-mitm/ns-create2.pcap b/_tmp/osm-mitm/ns-create2.pcap new file mode 100644 index 0000000..2ed2311 Binary files /dev/null and b/_tmp/osm-mitm/ns-create2.pcap differ diff --git a/_tmp/osm-mitm/nspkg-create.pcap b/_tmp/osm-mitm/nspkg-create.pcap new file mode 100644 index 0000000..c32fc0f Binary files /dev/null and b/_tmp/osm-mitm/nspkg-create.pcap differ diff --git a/_tmp/osm-mitm/stunnel-mitm-proxy.conf b/_tmp/osm-mitm/stunnel-mitm-proxy.conf new file mode 100644 index 0000000..b743e9b --- /dev/null +++ b/_tmp/osm-mitm/stunnel-mitm-proxy.conf @@ -0,0 +1,18 @@ +# see: https://gist.github.com/jeremiahsnapp/6426298 + +debug = 3 + +#foreground = yes + +pid = + +[server] +client = no +# cert= ./server.pem +accept = 127.0.0.1:8080 +connect = 127.0.0.1:9999 + +[client] +client = yes +accept = 127.0.0.1:8080 +connect = 127.0.0.1:9999 diff --git a/_tmp/osm-pkgs/README.md b/_tmp/osm-pkgs/README.md new file mode 100644 index 0000000..72efdb2 --- /dev/null +++ b/_tmp/osm-pkgs/README.md @@ -0,0 +1,41 @@ +OSM Packages for demo +===================== + +Original packages downloaded from: + +- https://osm-download.etsi.org/ftp/Packages/examples/ + +You need to also add OSM repos before you can create an NS instance from +these packages. + + +OpenLDAP +-------- + +### Issue +The OpenLDAP Helm chart in the original is 1.2.3 which doesn't work when +upgrading the NS instance---see `clusterIP` issue documented in the [OSM +client HTTP message flows][msg-flows]. + +### Fix +We modified the VNFD to use version 1.2.7 instead which doesn't have this +problem. The `openldap_knf.tar.gz` file in this dir contains the fix. + +Here's what I did. + +1. Extract original package to `openldap_knf` dir. +2. Change Helm chart version to `1.2.7`. +3. Repackage. + +Here are the commands for the repackage step + +```bash +$ md5sum openldap_knf/openldap_vnfd.yaml > openldap_knf/checksums.txt +$ rm openldap_knf.tar.gz +$ tar -czvf openldap_knf.tar.gz openldap_knf +``` + + + + +[msg-flows]: ../osm-mitm/message-flows.md \ No newline at end of file diff --git a/_tmp/osm-pkgs/openldap_knf.tar.gz b/_tmp/osm-pkgs/openldap_knf.tar.gz new file mode 100644 index 0000000..ab5cd67 Binary files /dev/null and b/_tmp/osm-pkgs/openldap_knf.tar.gz differ diff --git a/_tmp/osm-pkgs/openldap_knf/checksums.txt b/_tmp/osm-pkgs/openldap_knf/checksums.txt new file mode 100644 index 0000000..8c84c61 --- /dev/null +++ b/_tmp/osm-pkgs/openldap_knf/checksums.txt @@ -0,0 +1 @@ +7044f64c16d4ef3eeef7f8668a4dc5a1 openldap_knf/openldap_vnfd.yaml diff --git a/_tmp/osm-pkgs/openldap_knf/openldap_vnfd.yaml b/_tmp/osm-pkgs/openldap_knf/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/_tmp/osm-pkgs/openldap_knf/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/_tmp/osm-pkgs/openldap_ns.tar.gz b/_tmp/osm-pkgs/openldap_ns.tar.gz new file mode 100644 index 0000000..0cbac79 Binary files /dev/null and b/_tmp/osm-pkgs/openldap_ns.tar.gz differ diff --git a/_tmp/osm-pkgs/openldap_ns/README.md b/_tmp/osm-pkgs/openldap_ns/README.md new file mode 100644 index 0000000..8424611 --- /dev/null +++ b/_tmp/osm-pkgs/openldap_ns/README.md @@ -0,0 +1,26 @@ +# SIMPLE OPEN-LDAP CHART + +Descriptors that installs an openldap version 1.2.1 chart in a K8s cluster + +There is one VNF (openldap\_vnf) with only one KDU. + +There is one NS that connects the VNF to a mgmt network + +## Onboarding and instantiation + +```bash +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --ssh_keys ${HOME}/.ssh/id_rsa.pub +``` + +### Instantiation option + +Some parameters could be passed during the instantiation. + +* replicaCount: Number of Open LDAP replicas that will be created + +```bash +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", "additionalParams": {"replicaCount": "2"}}]}' +``` + diff --git a/_tmp/osm-pkgs/openldap_ns/checksums.txt b/_tmp/osm-pkgs/openldap_ns/checksums.txt new file mode 100644 index 0000000..f07358d --- /dev/null +++ b/_tmp/osm-pkgs/openldap_ns/checksums.txt @@ -0,0 +1,2 @@ +6cbc0db17616eff57c60efa0eb15ac76 openldap_ns/openldap_nsd.yaml +c122710acb043b99be209fefd9ae2032 openldap_ns/README.md diff --git a/_tmp/osm-pkgs/openldap_ns/openldap_nsd.yaml b/_tmp/osm-pkgs/openldap_ns/openldap_nsd.yaml new file mode 100644 index 0000000..b995164 --- /dev/null +++ b/_tmp/osm-pkgs/openldap_ns/openldap_nsd.yaml @@ -0,0 +1,22 @@ +nsd: + nsd: + - description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: 'true' + vnfd-id: + - openldap_knf diff --git a/_tmp/osmops-packaging/failed-attempt-to-use-targz.md b/_tmp/osmops-packaging/failed-attempt-to-use-targz.md new file mode 100644 index 0000000..4f8e2e2 --- /dev/null +++ b/_tmp/osmops-packaging/failed-attempt-to-use-targz.md @@ -0,0 +1,1164 @@ +mactel:source-watcher andrea$ nix-shell +copying path '/nix/store/g3kd1m6ilslxwca72j1yh0r1hg7r8gn8-source' from 'https://cache.nixos.org'... +this derivation will be built: + /nix/store/yw6ndp40vvpa5784j8j804sirkpybm56-kubebuilder-3.1.0.drv +these 182 paths will be fetched (353.44 MiB download, 1657.17 MiB unpacked): + /nix/store/00ngdc8ay9srb7ckm99kx6ims2siwqgq-apple-framework-CFNetwork + /nix/store/00vcq1i9x1dwwn21dqkagybcxa1lcnwi-xz-5.2.5-bin + /nix/store/0idd3d3y4vbiilwzhjq1my4npac6bp3r-libxml2-2.9.12 + /nix/store/0yrrpxkc6l1kvdf491qa1m7scamsmqb7-libcxxabi-7.1.0 + /nix/store/156rmc1d7m0w0awhdn8jky03i1nyf7rh-perl5.34.0-Test-Needs-0.002006 + /nix/store/1ahb54by7x5h7yn8703nq25fiwdzg4ix-nghttp2-1.43.0-lib + /nix/store/1h653vqxkd69ylzgxbdrj4sx7idki7g5-apple-framework-CoreVideo + /nix/store/1xjwb0bd02n1y621mjqani68kh6n6gig-perl-5.34.0 + /nix/store/2cn4lldfz1w5wddxc00za2igd7v82n0f-go-outline-unstable-2018-11-22 + /nix/store/2fzds9cqx148i1lf84sc9i0982bv9rbx-findutils-4.8.0 + /nix/store/2m4j2za4hmj83f0677yiznff129vdf3k-apple-framework-DiskArbitration + /nix/store/2qnpghhvim4yv0cb0ds5swcll09k841m-apple-framework-CoreGraphics + /nix/store/34apg7fbp8c6ppkfh5bzp0nbpacj2h27-bash-interactive-5.1-p12 + /nix/store/3iz44xrsq7niprab0majz08hjm6djiv7-indent-2.2.12 + /nix/store/3n9fvf4ybdwizym6cwnjf6f57p7mvrab-gzip-1.10 + /nix/store/3viavkq7bpd0phpy5aa0lqblhi63z2w0-gotests-1.5.3 + /nix/store/3wzawr2vj64y2l47nak2vvwckzci8d64-apple-framework-Metal + /nix/store/3yq0m0kprzf172ljfwdl6sidvf6v6j9i-stdenv-darwin + /nix/store/3yrzfhqvq41nh8fi8mjm77hwhaqprzff-apple-framework-QuartzCore + /nix/store/3zynnka2fj57s030kdvp5121cb5mrsvf-apple-framework-CoreMIDI + /nix/store/4ij9w5ck9m2ijpqpfjz46cnsa01nlibl-perl5.34.0-Try-Tiny-0.30 + /nix/store/4j3q5kmmdwkgppn6g8hna8wg6a4zrvxd-cctools-binutils-darwin-wrapper-949.0.1 + /nix/store/4npah0kdy1jgqsbb4sw2vpbv9l2zsh18-Libsystem-1238.60.2 + /nix/store/4xp86qi542rlddjxndff141l4cyqw9ri-libxml2-2.9.12 + /nix/store/55nwvpwcx5hvp9ky2yf3n38xfj171idl-compiler-rt-libc-7.1.0 + /nix/store/5cv4vf1jybf00q4n82ys54pml0gvhr7d-lzo-2.10 + /nix/store/5f1qxwxxp9mag9r91mdfs3w1x3y99kxm-bzip2-1.0.6.0.2 + /nix/store/5hpyrir2f419dch43nlkz0mjzznnyw1j-apple-framework-IOKit + /nix/store/5kkpswp4zyzwwfwhccaszvrrfwifxg6r-perl5.34.0-IO-HTML-1.004 + /nix/store/5m4flh5gnzd939x16p9m063i1im9is5w-gopls-0.7.0 + /nix/store/5mkv08mg9kk2gx12j0abfp5x348yf691-perl5.34.0-CGI-Fast-2.15 + /nix/store/5vw0211970xi853z6l9a42s0kdbzv3b0-libkrb5-1.18 + /nix/store/5zwjd1qglpnipr0f2j5lf8lngp5244b0-Toolchains + /nix/store/627zfcplv9dd15i19hnbjjwi18yqp3a5-nix.xcconfig + /nix/store/645ys9v8wd3grfvln7mgvnc8854yhdrl-apple-framework-Accelerate + /nix/store/68yn2imvrrw86mbiwkk195n16fmm2f7f-apple-framework-CoreImage + /nix/store/6biazmfb7jq7y0zxqf4r6b3nxx24n8f9-cctools-port-949.0.1 + /nix/store/6i6c9gz5d79b97gnr0a12yn3d49dyrqy-apple-framework-AppKit + /nix/store/6icby0hr0lp7gswwqq207b8lvvjxi9wq-perl5.34.0-Test-Fatal-0.016 + /nix/store/6krkglhc5la6a9gjryibzqpiiwkx03nd-Platforms + /nix/store/6wf3aa69wzi83lxz1gy41y90fiypz767-perl5.34.0-FCGI-0.79 + /nix/store/6y7x4wlzbhf5m0lzacykmgnz4ln6dmgy-openvpn-2.5.2 + /nix/store/715y5lnw12vdcc3hh9sc9vbkdi6c83x6-bash-4.4-p23 + /nix/store/7a27b4f7c3sqbjc1vg0fjnplkqd2b27d-expat-2.4.1 + /nix/store/7i321ai77n1y10vdvb7rsnk1ng8vp11b-ed-1.17 + /nix/store/7xp1yxk8d67ik4rfcbxxjrfv6fnch0p5-iana-etc-20210225 + /nix/store/7zjv0kdb724q9lpjgx9k5wk58dg1h47v-apple-framework-ApplicationServices + /nix/store/87y9iidffga2j5grsdvvwfj76l6xr6lb-apple-framework-CoreBluetooth + /nix/store/8a8c4ahdnnp3p8nf1r4q2g881yz0cp7v-perl5.34.0-CGI-4.51 + /nix/store/8hj3symc74118i5lx2mk5ckawpy5yhzg-kubectl-1.21.3 + /nix/store/8jv49b53ybn6fnwhd0dm2vk42w9zzpcv-expand-response-params + /nix/store/9wy7j2x17zbg471ag8n8wnvakf2dga4k-perl5.34.0-Net-HTTP-6.19 + /nix/store/a2y06jcg1jsghnrakw4qr27vlm1cdzym-apple-framework-OpenCL + /nix/store/a74jwf4lgca5d05klna9slxydgdsngk9-tzdata-2021a + /nix/store/aa3xbipcdryaf3mn9ic5y1smzlxngvfz-clang-7.1.0-lib + /nix/store/ad19ya4siaxhz63w0m8xh97067iahks0-python3-3.9.6 + /nix/store/adigdkh0pibb4afqf5q2sv3503ln0iyb-diffutils-3.7 + /nix/store/aqrq990va9j1v6gvrgxksr5av9vpcxz3-gnumake-4.3 + /nix/store/b16jnrlaclssglx8zszp4dwp0i893y5w-apple-framework-CoreWLAN + /nix/store/b2wj786d4qizfamzwhc6qfsb1yaaqsm5-curl-7.76.1 + /nix/store/b73lrrsjx4bvp8qh7jairv55syvagkwd-perl5.34.0-HTML-Tagset-3.20 + /nix/store/baa2clyal7jbzjfcaj62sjzjpm407ixh-perl5.34.0-WWW-RobotRules-6.02 + /nix/store/bisagmfn1vpfswv1r1vpa1d8hh90scsz-apple-framework-IOBluetooth + /nix/store/bk8h2xqxsxrvbdf96cgdr5xvw2yafp8j-nghttp2-1.43.0-lib + /nix/store/bmxvqnmx5afyvhk3iciapgfd71yxdzp7-libfido2-1.7.0 + /nix/store/bx78qsqny0bfaf6s6idjmjx83j470n3r-apple-framework-CoreFoundation + /nix/store/bzkf6rs464xs44xqkcrkb0czidb122jg-apple-framework-IOSurface + /nix/store/c072idq716ibax9jv8rhyb64rhnxzmfj-apple-framework-OpenGL + /nix/store/c8wnza9p6j649x3j4w4rgkzcy43p2f8m-gperf-3.1 + /nix/store/chpxcjxdq0cyxcs5dc2zkkaxivwih0jj-apple-framework-CoreData + /nix/store/ckbmjdmsa2gx240sz4vr9z8hivn9gp5x-Security-55471.14.18 + /nix/store/cklqcidnwvvmp6h9gpdbnldmkak0mz4h-perl5.34.0-HTML-TagCloud-0.38 + /nix/store/clgjx9nnbgcb5zgndhn45b2kas7d3ily-perl5.34.0-HTTP-Date-6.05 + /nix/store/cqadarnj29fpv1vm8bzbh6dpb8aaxzjz-patch-2.7.6 + /nix/store/crjmgrws5w2pjhm5k887m93rrkcf36hb-coreutils-8.32 + /nix/store/crz75rnr1xsp57snim91irbrgi2kxi5d-apple-framework-SystemConfiguration + /nix/store/cy3jlnw70j7w7rwjdz7r7b3kg4hcfcmf-SDKs + /nix/store/cz81sbpnda675pfq7gfnsdr9k8llgfw4-hidapi-0.10.1 + /nix/store/d051nmxsz58mjirkwg5x8jlbd50gy1ld-xcodebuild-0.1.2-pre + /nix/store/d8b3d560b7jyszsz0j4bx7qpxmagnfij-unifdef-2.12 + /nix/store/dh67n9z3mhlmmcj4nyk7pm9nhcas9s8y-apple-framework-CoreServices + /nix/store/dil64k4f2ff0an9p57rvhpxyw5vl1qxk-bash-interactive-5.1-p12-man + /nix/store/djk25cq1wr1vs7qbybzak4a6alhrsd8x-configd-453.19 + /nix/store/dlh18gcgvy32811cnirjmadpn9x6lahg-perl5.34.0-Encode-Locale-1.05 + /nix/store/dmlj5yb048cs7njcrc52hnzk294vd2bd-xcbuild-0.1.2-pre + /nix/store/dw0qfmxss8if5ks8qr0rk2sb8wnng3dw-libtapi-1100.0.11 + /nix/store/f2qq98ksl340z3ncwcwx6fcjp4mc6iyy-brotli-1.0.9-lib + /nix/store/falx4gakl6k1q64aqr4f8mvy8vfqcqaq-cctools-binutils-darwin-949.0.1 + /nix/store/fbhp0ph0366r5614xlincnvx6qj7rlsi-llvm-7.1.0-lib + /nix/store/fci25bpkavmg481ws49qdjvv9k4vmm6y-ncurses-6.2 + /nix/store/fpybf6mdl194p6sbnh3hq2zfm4wp7rxk-ctags-816 + /nix/store/fsz5rxl37pamwhqkmp9w4jryfc4pfky3-libcxx-7.1.0 + /nix/store/fx1yy6wzxm2w6b6xv177ryraa7vzjqa1-brotli-1.0.9-lib + /nix/store/g63nbvmh88c0l1kz3zq5x48dam13jdcs-go-1.16.6 + /nix/store/ggr82sipdfkxszl9qnbd0zg0hlqr7b1c-libssh2-1.9.0 + /nix/store/gswa3ywvzhm145g8jjhaihsa8qyjmc06-apple-framework-Foundation + /nix/store/h1cjrnwdmvmrh2mspkrr0agv0qn257dj-perl5.34.0-File-Listing-6.14 + /nix/store/hdrwrac2r06nmcifk2asg2j7hinb5qpb-gettext-0.21 + /nix/store/hnsbw22x8djbq0p70m2llq4qb3gbwmmi-ncurses-6.3 + /nix/store/hpgwq0an0vrjdqvlgj8szcqsyn4m0mg4-readline-8.1p0 + /nix/store/hvqn0g8wf6pxqpgcz1sdnwyyz5dcg1z0-kind-0.11.1 + /nix/store/hww71ca6bzlzyq1x6sbdvwdb8x09v2np-gopkgs-2.1.2 + /nix/store/i6rzv6vw3zx060w6r4spbvlgpdjgg2k4-apple-framework-ServiceManagement + /nix/store/ig8x9si4x7kwxv2qimvdwk2wpafv21gp-gettext-0.21 + /nix/store/ihlm88458pygspjfd44rzfl304rg362s-git-2.32.0 + /nix/store/j998frnsa1hxkfbysq0bkw6hjfrd928v-bzip2-1.0.6.0.2-bin + /nix/store/jggqvhq5db9c0q635i8r8qvlc30g72sv-libcbor-0.8.0 + /nix/store/jiwzfi73h9zadmnb6qgjlwjisan3sznd-apple-framework-AudioUnit + /nix/store/jrbfjc704hxs70q3ljjpyl0f4j7a57k9-apple-framework-Carbon + /nix/store/jx31ah14p6pf488i7wwd88y2izmxi0wj-clang-wrapper-7.1.0 + /nix/store/k4cs7vm12d9lb59jvfzv7cps73rwmn51-fluxcd-0.16.1 + /nix/store/k5szspjyr3axcj91vzmdy0imyswyc7fa-git-2.32.0-doc + /nix/store/kdnlp3kz0x7wbagn960i3rw657ws27hq-apple-framework-CoreText + /nix/store/kmmqfcwps62k79yb5rljgkydff5mkgfp-perl5.34.0-HTTP-Daemon-6.01 + /nix/store/l2mvac03b398x7jnhbqdf9051k4rsini-source + /nix/store/lpzfxmhf3z77qgpcxc3zfrbb2czzgx4q-apple-framework-NetFS + /nix/store/lr3fvwhvd8zpmjbibd3h02qdx85mxrv4-gnugrep-3.6 + /nix/store/lv7ycndyrcd0n8pn4v85wfjc62c8qbbv-bootstrap_cmds-121 + /nix/store/lw8gcf5givnwpflpgfkmayfq6by40wfr-compiler-rt-libc-7.1.0-dev + /nix/store/m8jmp0ffs11r0bigxd107v4j58r1amss-libssh2-1.9.0 + /nix/store/mbm45kwp1vqwnk52cp0w9dy4f0qsxljb-perl5.34.0-HTTP-Message-6.26 + /nix/store/mg2ar6f1r2nssalsss32lyqifwlhhmzb-pcre-8.44 + /nix/store/mjlkzmwrih5bz2hzi9yzm5ngridf0n53-pcre2-10.36 + /nix/store/mkr49brmqqad1yjac4lgffl1rjk0bf6m-readline-6.3p08 + /nix/store/mq766n41v9djm2rf74dpg4hd7n5mbwwh-xz-5.2.5 + /nix/store/mrdsjbp54ys55dmav5501k59jqk5vy2b-libkrb5-1.18 + /nix/store/mrvcsb49ak8fhf40jpc1r11vkbvqkind-bash-interactive-5.1-p12-info + /nix/store/mwhc7d6g1zzrvjvqwkf988gbf5q03lrf-perl5.34.0-FCGI-ProcManager-0.28 + /nix/store/mzzra6k9pi86yasa7x7ql4rc2mdfapi1-apple-framework-OpenDirectory + /nix/store/n4g6bc1gs1jjhx1hzznqh3950ix60v5i-objc4-709.1 + /nix/store/ncyf8lxiaj4j3d1c3fwxkghy2vjwbshl-apple-framework-UIFoundation + /nix/store/niqwc81ynr8csa9rdz9l0zg2qcjhzspb-curl-7.76.1 + /nix/store/njfdj5rp1r3yc1sv9r9ffph945928nx6-perl5.34.0-HTTP-Cookies-6.09 + /nix/store/nmn13jb7mr8js8dnfpqq02xh6i5yhpsw-delve-1.7.0 + /nix/store/np1cc9lc9jn9pxbfzkgbz24c8pxbibwa-llvm-7.1.0 + /nix/store/nsikv84v16n9dgmfnmpr5fc572nhfs1y-bash-interactive-5.1-p12-dev + /nix/store/nvvdn0i838cr7cp1q7k6sglps5qh2r70-kubebuilder-3.1.0-go-modules + /nix/store/nvypxmbcfc8yxrpgsgxhbnp6s2d4a5az-libedit-20210522-3.1 + /nix/store/p04pcf2xq0w8b6awc02vmvd6p66nnsjn-gomodifytags-1.6.0 + /nix/store/p64d7siwnvf3fpscjzifgn90q859wn3w-apple-framework-CoreAudio + /nix/store/p7pw00idxa2b4ywb207d29qik09bs5h3-perl5.34.0-LWP-MediaTypes-6.04 + /nix/store/phq0s0i1vhlvs7y77gyygnyd1zm76mlh-gawk-5.1.0 + /nix/store/pzjrm7c57jqpsajpbfzrx093l7dm60v6-openssh-8.6p1 + /nix/store/q647daz7i71y30birkp6gls1r6fffwp2-perl5.34.0-Test-RequiresInternet-0.05 + /nix/store/q7wxq69v35d0bxiqrycldz00wg9kxqzd-gnutar-1.34 + /nix/store/qc7hpsj9h68fnidcbvq4g6lih3666wv7-perl5.34.0-TimeDate-2.33 + /nix/store/qgygs5g9l0zxhs8fzjjqpmmlzps1hlrd-libiconv-50 + /nix/store/qkjmhc3zlsm09rn9f4h9m410m844apbm-flex-2.6.4 + /nix/store/qmzjj3g1rbqyhi84ii8chr0g7sh8ahm0-perl5.34.0-libwww-perl-6.49 + /nix/store/qqigwc2653h1jhidd7jg9y877yw56yqd-impl-1.0.0 + /nix/store/r1m56hdkhap9pcnngp5lzmgwa4yl1x29-apple-framework-Security + /nix/store/r2w4s36nr4w9gnchz4nyzj64nyaig9cr-ICU-66108 + /nix/store/rsza0nf7wwmv0c17q0x9yhf79ycvqgm1-openssl-1.1.1k + /nix/store/s7bqhhx7pwwif7rn708gnqq0giia5v89-hook + /nix/store/sdbsgmdc9xvlngvd2m2nr2vvzyzbcmyh-perl5.34.0-HTML-Parser-3.75 + /nix/store/shp60sgvnh5is0mcd7w86gq531ws3sxn-openssl-1.1.1k + /nix/store/szcfcci0blvj5xz5snygxy3271dbzlc0-bison-3.7.6 + /nix/store/v4bzbvfhzsq0294vyxzy06c3c8b9hjg6-kustomize-4.2.0 + /nix/store/v88kadzxxwdkwmksjprhabjpdz43ic49-hook + /nix/store/va4ipz29l8a1v9gvw7x3r5pp5zwv254b-sqlite-3.35.5 + /nix/store/vbmi4ha8c014q3bri9iriggbd8hqdqss-adv_cmds-119-locale + /nix/store/vxc4vzqx02hf3795vk5d9an3xvx2ml90-gnum4-1.4.18 + /nix/store/w9vgngyar0ykvmamm6k0bf5k9yn9kbhb-swift-corefoundation + /nix/store/wia4ggzr91yglpq6z1lg48qnv5z4yy94-binutils-2.35.1 + /nix/store/wkgrylws6qa3iajc2py42m5fhyxnwkjv-gmp-6.2.1 + /nix/store/wpx3d215y08c37sc5fzlic5hkn8hhha4-apple-framework-Cocoa + /nix/store/x87bqc42xmskjwf5c2a9bbzkd8aqwrca-zlib-1.2.11 + /nix/store/x9chz553wdb3j2hpq2z5pgg43ysrq87n-perl5.34.0-HTTP-Negotiate-6.01 + /nix/store/xcn2cfrpql8panqjqhfxd4sqlzh5ja66-perl5.34.0-URI-5.05 + /nix/store/xhijdap7ln3nw867cq6m3knshww9f2rn-clang-7.1.0 + /nix/store/xz9n2vz4jpkq81x0wxjsxg986gc4pjz1-perl5.34.0-TermReadKey-2.38 + /nix/store/y70lq54ix9qc0hcahnr452z1hag6w5sq-apple-framework-ImageIO + /nix/store/y736kjvzdclbzcxdv5fc9ghb832fq0i7-libcxxabi-7.1.0-dev + /nix/store/yhc9gnqw4vm3b84xqcyfxalph5bng2cg-apple-lib-xpc + /nix/store/yjamjr77klbn539xib8vamx5sp6wcgzs-apple-framework-AudioToolbox + /nix/store/z3yld7y61yiklysjzf6k6kvfihyscxgb-apple-framework-SecurityFoundation + /nix/store/z6ih791dzyjvdqm8gl5glz065kk8zh7p-gnused-4.8 + /nix/store/z7r4pl0q7kvqjgd87zc60jj4b4djhn4h-bash-interactive-5.1-p12-doc + /nix/store/zgpnab89ggi39cvmz6nvwxsav0vfniwj-go-tools-2021.1 + /nix/store/zk81d86gikl6dm96kfylgd2yi7ji8qbm-gdbm-1.20 + /nix/store/znv06z6pld0ivwddj52rishggwc2m94c-libcxx-7.1.0-dev + /nix/store/zyh3yfv4npb5vyhqmp6hswfps0j7smyn-libffi-3.3 +copying path '/nix/store/715y5lnw12vdcc3hh9sc9vbkdi6c83x6-bash-4.4-p23' from 'https://cache.nixos.org'... +copying path '/nix/store/4npah0kdy1jgqsbb4sw2vpbv9l2zsh18-Libsystem-1238.60.2' from 'https://cache.nixos.org'... +copying path '/nix/store/cy3jlnw70j7w7rwjdz7r7b3kg4hcfcmf-SDKs' from 'https://cache.nixos.org'... +copying path '/nix/store/ckbmjdmsa2gx240sz4vr9z8hivn9gp5x-Security-55471.14.18' from 'https://cache.nixos.org'... +copying path '/nix/store/6krkglhc5la6a9gjryibzqpiiwkx03nd-Platforms' from 'https://cache.nixos.org'... +copying path '/nix/store/vbmi4ha8c014q3bri9iriggbd8hqdqss-adv_cmds-119-locale' from 'https://cache.nixos.org'... +copying path '/nix/store/00ngdc8ay9srb7ckm99kx6ims2siwqgq-apple-framework-CFNetwork' from 'https://cache.nixos.org'... +copying path '/nix/store/87y9iidffga2j5grsdvvwfj76l6xr6lb-apple-framework-CoreBluetooth' from 'https://cache.nixos.org'... +copying path '/nix/store/chpxcjxdq0cyxcs5dc2zkkaxivwih0jj-apple-framework-CoreData' from 'https://cache.nixos.org'... +copying path '/nix/store/bx78qsqny0bfaf6s6idjmjx83j470n3r-apple-framework-CoreFoundation' from 'https://cache.nixos.org'... +copying path '/nix/store/68yn2imvrrw86mbiwkk195n16fmm2f7f-apple-framework-CoreImage' from 'https://cache.nixos.org'... +copying path '/nix/store/3zynnka2fj57s030kdvp5121cb5mrsvf-apple-framework-CoreMIDI' from 'https://cache.nixos.org'... +copying path '/nix/store/5hpyrir2f419dch43nlkz0mjzznnyw1j-apple-framework-IOKit' from 'https://cache.nixos.org'... +copying path '/nix/store/3wzawr2vj64y2l47nak2vvwckzci8d64-apple-framework-Metal' from 'https://cache.nixos.org'... +copying path '/nix/store/p64d7siwnvf3fpscjzifgn90q859wn3w-apple-framework-CoreAudio' from 'https://cache.nixos.org'... +copying path '/nix/store/2m4j2za4hmj83f0677yiznff129vdf3k-apple-framework-DiskArbitration' from 'https://cache.nixos.org'... +copying path '/nix/store/yjamjr77klbn539xib8vamx5sp6wcgzs-apple-framework-AudioToolbox' from 'https://cache.nixos.org'... +copying path '/nix/store/bisagmfn1vpfswv1r1vpa1d8hh90scsz-apple-framework-IOBluetooth' from 'https://cache.nixos.org'... +copying path '/nix/store/lpzfxmhf3z77qgpcxc3zfrbb2czzgx4q-apple-framework-NetFS' from 'https://cache.nixos.org'... +copying path '/nix/store/mzzra6k9pi86yasa7x7ql4rc2mdfapi1-apple-framework-OpenDirectory' from 'https://cache.nixos.org'... +copying path '/nix/store/c072idq716ibax9jv8rhyb64rhnxzmfj-apple-framework-OpenGL' from 'https://cache.nixos.org'... +copying path '/nix/store/r1m56hdkhap9pcnngp5lzmgwa4yl1x29-apple-framework-Security' from 'https://cache.nixos.org'... +copying path '/nix/store/z3yld7y61yiklysjzf6k6kvfihyscxgb-apple-framework-SecurityFoundation' from 'https://cache.nixos.org'... +copying path '/nix/store/i6rzv6vw3zx060w6r4spbvlgpdjgg2k4-apple-framework-ServiceManagement' from 'https://cache.nixos.org'... +copying path '/nix/store/b16jnrlaclssglx8zszp4dwp0i893y5w-apple-framework-CoreWLAN' from 'https://cache.nixos.org'... +copying path '/nix/store/dh67n9z3mhlmmcj4nyk7pm9nhcas9s8y-apple-framework-CoreServices' from 'https://cache.nixos.org'... +copying path '/nix/store/645ys9v8wd3grfvln7mgvnc8854yhdrl-apple-framework-Accelerate' from 'https://cache.nixos.org'... +copying path '/nix/store/crz75rnr1xsp57snim91irbrgi2kxi5d-apple-framework-SystemConfiguration' from 'https://cache.nixos.org'... +copying path '/nix/store/ncyf8lxiaj4j3d1c3fwxkghy2vjwbshl-apple-framework-UIFoundation' from 'https://cache.nixos.org'... +copying path '/nix/store/yhc9gnqw4vm3b84xqcyfxalph5bng2cg-apple-lib-xpc' from 'https://cache.nixos.org'... +copying path '/nix/store/z7r4pl0q7kvqjgd87zc60jj4b4djhn4h-bash-interactive-5.1-p12-doc' from 'https://cache.nixos.org'... +copying path '/nix/store/bzkf6rs464xs44xqkcrkb0czidb122jg-apple-framework-IOSurface' from 'https://cache.nixos.org'... +copying path '/nix/store/mrvcsb49ak8fhf40jpc1r11vkbvqkind-bash-interactive-5.1-p12-info' from 'https://cache.nixos.org'... +copying path '/nix/store/2qnpghhvim4yv0cb0ds5swcll09k841m-apple-framework-CoreGraphics' from 'https://cache.nixos.org'... +copying path '/nix/store/a2y06jcg1jsghnrakw4qr27vlm1cdzym-apple-framework-OpenCL' from 'https://cache.nixos.org'... +copying path '/nix/store/kdnlp3kz0x7wbagn960i3rw657ws27hq-apple-framework-CoreText' from 'https://cache.nixos.org'... +copying path '/nix/store/y70lq54ix9qc0hcahnr452z1hag6w5sq-apple-framework-ImageIO' from 'https://cache.nixos.org'... +copying path '/nix/store/dil64k4f2ff0an9p57rvhpxyw5vl1qxk-bash-interactive-5.1-p12-man' from 'https://cache.nixos.org'... +copying path '/nix/store/7zjv0kdb724q9lpjgx9k5wk58dg1h47v-apple-framework-ApplicationServices' from 'https://cache.nixos.org'... +copying path '/nix/store/fx1yy6wzxm2w6b6xv177ryraa7vzjqa1-brotli-1.0.9-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/1h653vqxkd69ylzgxbdrj4sx7idki7g5-apple-framework-CoreVideo' from 'https://cache.nixos.org'... +copying path '/nix/store/5f1qxwxxp9mag9r91mdfs3w1x3y99kxm-bzip2-1.0.6.0.2' from 'https://cache.nixos.org'... +copying path '/nix/store/7i321ai77n1y10vdvb7rsnk1ng8vp11b-ed-1.17' from 'https://cache.nixos.org'... +copying path '/nix/store/j998frnsa1hxkfbysq0bkw6hjfrd928v-bzip2-1.0.6.0.2-bin' from 'https://cache.nixos.org'... +copying path '/nix/store/8jv49b53ybn6fnwhd0dm2vk42w9zzpcv-expand-response-params' from 'https://cache.nixos.org'... +copying path '/nix/store/phq0s0i1vhlvs7y77gyygnyd1zm76mlh-gawk-5.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/k5szspjyr3axcj91vzmdy0imyswyc7fa-git-2.32.0-doc' from 'https://cache.nixos.org'... +copying path '/nix/store/aqrq990va9j1v6gvrgxksr5av9vpcxz3-gnumake-4.3' from 'https://cache.nixos.org'... +copying path '/nix/store/z6ih791dzyjvdqm8gl5glz065kk8zh7p-gnused-4.8' from 'https://cache.nixos.org'... +copying path '/nix/store/2cn4lldfz1w5wddxc00za2igd7v82n0f-go-outline-unstable-2018-11-22' from 'https://cache.nixos.org'... +copying path '/nix/store/p04pcf2xq0w8b6awc02vmvd6p66nnsjn-gomodifytags-1.6.0' from 'https://cache.nixos.org'... +copying path '/nix/store/hww71ca6bzlzyq1x6sbdvwdb8x09v2np-gopkgs-2.1.2' from 'https://cache.nixos.org'... +copying path '/nix/store/3n9fvf4ybdwizym6cwnjf6f57p7mvrab-gzip-1.10' from 'https://cache.nixos.org'... +copying path '/nix/store/s7bqhhx7pwwif7rn708gnqq0giia5v89-hook' from 'https://cache.nixos.org'... +copying path '/nix/store/7xp1yxk8d67ik4rfcbxxjrfv6fnch0p5-iana-etc-20210225' from 'https://cache.nixos.org'... +copying path '/nix/store/v88kadzxxwdkwmksjprhabjpdz43ic49-hook' from 'https://cache.nixos.org'... +copying path '/nix/store/nmn13jb7mr8js8dnfpqq02xh6i5yhpsw-delve-1.7.0' from 'https://cache.nixos.org'... +copying path '/nix/store/k4cs7vm12d9lb59jvfzv7cps73rwmn51-fluxcd-0.16.1' from 'https://cache.nixos.org'... +copying path '/nix/store/zgpnab89ggi39cvmz6nvwxsav0vfniwj-go-tools-2021.1' from 'https://cache.nixos.org'... +copying path '/nix/store/5m4flh5gnzd939x16p9m063i1im9is5w-gopls-0.7.0' from 'https://cache.nixos.org'... +copying path '/nix/store/3viavkq7bpd0phpy5aa0lqblhi63z2w0-gotests-1.5.3' from 'https://cache.nixos.org'... +copying path '/nix/store/qqigwc2653h1jhidd7jg9y877yw56yqd-impl-1.0.0' from 'https://cache.nixos.org'... +copying path '/nix/store/hvqn0g8wf6pxqpgcz1sdnwyyz5dcg1z0-kind-0.11.1' from 'https://cache.nixos.org'... +copying path '/nix/store/nvvdn0i838cr7cp1q7k6sglps5qh2r70-kubebuilder-3.1.0-go-modules' from 'https://cache.nixos.org'... +copying path '/nix/store/8hj3symc74118i5lx2mk5ckawpy5yhzg-kubectl-1.21.3' from 'https://cache.nixos.org'... +copying path '/nix/store/v4bzbvfhzsq0294vyxzy06c3c8b9hjg6-kustomize-4.2.0' from 'https://cache.nixos.org'... +copying path '/nix/store/0yrrpxkc6l1kvdf491qa1m7scamsmqb7-libcxxabi-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/zyh3yfv4npb5vyhqmp6hswfps0j7smyn-libffi-3.3' from 'https://cache.nixos.org'... +copying path '/nix/store/fsz5rxl37pamwhqkmp9w4jryfc4pfky3-libcxx-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/y736kjvzdclbzcxdv5fc9ghb832fq0i7-libcxxabi-7.1.0-dev' from 'https://cache.nixos.org'... +copying path '/nix/store/r2w4s36nr4w9gnchz4nyzj64nyaig9cr-ICU-66108' from 'https://cache.nixos.org'... +copying path '/nix/store/55nwvpwcx5hvp9ky2yf3n38xfj171idl-compiler-rt-libc-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/wkgrylws6qa3iajc2py42m5fhyxnwkjv-gmp-6.2.1' from 'https://cache.nixos.org'... +copying path '/nix/store/lw8gcf5givnwpflpgfkmayfq6by40wfr-compiler-rt-libc-7.1.0-dev' from 'https://cache.nixos.org'... +copying path '/nix/store/crjmgrws5w2pjhm5k887m93rrkcf36hb-coreutils-8.32' from 'https://cache.nixos.org'... +copying path '/nix/store/znv06z6pld0ivwddj52rishggwc2m94c-libcxx-7.1.0-dev' from 'https://cache.nixos.org'... +copying path '/nix/store/adigdkh0pibb4afqf5q2sv3503ln0iyb-diffutils-3.7' from 'https://cache.nixos.org'... +copying path '/nix/store/2fzds9cqx148i1lf84sc9i0982bv9rbx-findutils-4.8.0' from 'https://cache.nixos.org'... +copying path '/nix/store/qgygs5g9l0zxhs8fzjjqpmmlzps1hlrd-libiconv-50' from 'https://cache.nixos.org'... +copying path '/nix/store/5vw0211970xi853z6l9a42s0kdbzv3b0-libkrb5-1.18' from 'https://cache.nixos.org'... +copying path '/nix/store/ig8x9si4x7kwxv2qimvdwk2wpafv21gp-gettext-0.21' from 'https://cache.nixos.org'... +copying path '/nix/store/fci25bpkavmg481ws49qdjvv9k4vmm6y-ncurses-6.2' from 'https://cache.nixos.org'... +copying path '/nix/store/q7wxq69v35d0bxiqrycldz00wg9kxqzd-gnutar-1.34' from 'https://cache.nixos.org'... +copying path '/nix/store/dw0qfmxss8if5ks8qr0rk2sb8wnng3dw-libtapi-1100.0.11' from 'https://cache.nixos.org'... +copying path '/nix/store/hnsbw22x8djbq0p70m2llq4qb3gbwmmi-ncurses-6.3' from 'https://cache.nixos.org'... +copying path '/nix/store/6biazmfb7jq7y0zxqf4r6b3nxx24n8f9-cctools-port-949.0.1' from 'https://cache.nixos.org'... +copying path '/nix/store/bk8h2xqxsxrvbdf96cgdr5xvw2yafp8j-nghttp2-1.43.0-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/627zfcplv9dd15i19hnbjjwi18yqp3a5-nix.xcconfig' from 'https://cache.nixos.org'... +copying path '/nix/store/n4g6bc1gs1jjhx1hzznqh3950ix60v5i-objc4-709.1' from 'https://cache.nixos.org'... +copying path '/nix/store/rsza0nf7wwmv0c17q0x9yhf79ycvqgm1-openssl-1.1.1k' from 'https://cache.nixos.org'... +copying path '/nix/store/gswa3ywvzhm145g8jjhaihsa8qyjmc06-apple-framework-Foundation' from 'https://cache.nixos.org'... +copying path '/nix/store/3yrzfhqvq41nh8fi8mjm77hwhaqprzff-apple-framework-QuartzCore' from 'https://cache.nixos.org'... +copying path '/nix/store/cqadarnj29fpv1vm8bzbh6dpb8aaxzjz-patch-2.7.6' from 'https://cache.nixos.org'... +copying path '/nix/store/jrbfjc704hxs70q3ljjpyl0f4j7a57k9-apple-framework-Carbon' from 'https://cache.nixos.org'... +copying path '/nix/store/mg2ar6f1r2nssalsss32lyqifwlhhmzb-pcre-8.44' from 'https://cache.nixos.org'... +copying path '/nix/store/jiwzfi73h9zadmnb6qgjlwjisan3sznd-apple-framework-AudioUnit' from 'https://cache.nixos.org'... +copying path '/nix/store/lr3fvwhvd8zpmjbibd3h02qdx85mxrv4-gnugrep-3.6' from 'https://cache.nixos.org'... +copying path '/nix/store/6i6c9gz5d79b97gnr0a12yn3d49dyrqy-apple-framework-AppKit' from 'https://cache.nixos.org'... +copying path '/nix/store/dlh18gcgvy32811cnirjmadpn9x6lahg-perl5.34.0-Encode-Locale-1.05' from 'https://cache.nixos.org'... +copying path '/nix/store/wpx3d215y08c37sc5fzlic5hkn8hhha4-apple-framework-Cocoa' from 'https://cache.nixos.org'... +copying path '/nix/store/mwhc7d6g1zzrvjvqwkf988gbf5q03lrf-perl5.34.0-FCGI-ProcManager-0.28' from 'https://cache.nixos.org'... +copying path '/nix/store/cz81sbpnda675pfq7gfnsdr9k8llgfw4-hidapi-0.10.1' from 'https://cache.nixos.org'... +copying path '/nix/store/cklqcidnwvvmp6h9gpdbnldmkak0mz4h-perl5.34.0-HTML-TagCloud-0.38' from 'https://cache.nixos.org'... +copying path '/nix/store/b73lrrsjx4bvp8qh7jairv55syvagkwd-perl5.34.0-HTML-Tagset-3.20' from 'https://cache.nixos.org'... +copying path '/nix/store/5kkpswp4zyzwwfwhccaszvrrfwifxg6r-perl5.34.0-IO-HTML-1.004' from 'https://cache.nixos.org'... +copying path '/nix/store/p7pw00idxa2b4ywb207d29qik09bs5h3-perl5.34.0-LWP-MediaTypes-6.04' from 'https://cache.nixos.org'... +copying path '/nix/store/156rmc1d7m0w0awhdn8jky03i1nyf7rh-perl5.34.0-Test-Needs-0.002006' from 'https://cache.nixos.org'... +copying path '/nix/store/q647daz7i71y30birkp6gls1r6fffwp2-perl5.34.0-Test-RequiresInternet-0.05' from 'https://cache.nixos.org'... +copying path '/nix/store/qc7hpsj9h68fnidcbvq4g6lih3666wv7-perl5.34.0-TimeDate-2.33' from 'https://cache.nixos.org'... +copying path '/nix/store/4ij9w5ck9m2ijpqpfjz46cnsa01nlibl-perl5.34.0-Try-Tiny-0.30' from 'https://cache.nixos.org'... +copying path '/nix/store/clgjx9nnbgcb5zgndhn45b2kas7d3ily-perl5.34.0-HTTP-Date-6.05' from 'https://cache.nixos.org'... +copying path '/nix/store/6icby0hr0lp7gswwqq207b8lvvjxi9wq-perl5.34.0-Test-Fatal-0.016' from 'https://cache.nixos.org'... +copying path '/nix/store/h1cjrnwdmvmrh2mspkrr0agv0qn257dj-perl5.34.0-File-Listing-6.14' from 'https://cache.nixos.org'... +copying path '/nix/store/xcn2cfrpql8panqjqhfxd4sqlzh5ja66-perl5.34.0-URI-5.05' from 'https://cache.nixos.org'... +copying path '/nix/store/hpgwq0an0vrjdqvlgj8szcqsyn4m0mg4-readline-8.1p0' from 'https://cache.nixos.org'... +copying path '/nix/store/mbm45kwp1vqwnk52cp0w9dy4f0qsxljb-perl5.34.0-HTTP-Message-6.26' from 'https://cache.nixos.org'... +copying path '/nix/store/34apg7fbp8c6ppkfh5bzp0nbpacj2h27-bash-interactive-5.1-p12' from 'https://cache.nixos.org'... +copying path '/nix/store/njfdj5rp1r3yc1sv9r9ffph945928nx6-perl5.34.0-HTTP-Cookies-6.09' from 'https://cache.nixos.org'... +copying path '/nix/store/nsikv84v16n9dgmfnmpr5fc572nhfs1y-bash-interactive-5.1-p12-dev' from 'https://cache.nixos.org'... +copying path '/nix/store/kmmqfcwps62k79yb5rljgkydff5mkgfp-perl5.34.0-HTTP-Daemon-6.01' from 'https://cache.nixos.org'... +copying path '/nix/store/x9chz553wdb3j2hpq2z5pgg43ysrq87n-perl5.34.0-HTTP-Negotiate-6.01' from 'https://cache.nixos.org'... +copying path '/nix/store/9wy7j2x17zbg471ag8n8wnvakf2dga4k-perl5.34.0-Net-HTTP-6.19' from 'https://cache.nixos.org'... +copying path '/nix/store/baa2clyal7jbzjfcaj62sjzjpm407ixh-perl5.34.0-WWW-RobotRules-6.02' from 'https://cache.nixos.org'... +copying path '/nix/store/l2mvac03b398x7jnhbqdf9051k4rsini-source' from 'https://cache.nixos.org'... +copying path '/nix/store/a74jwf4lgca5d05klna9slxydgdsngk9-tzdata-2021a' from 'https://cache.nixos.org'... +copying path '/nix/store/mq766n41v9djm2rf74dpg4hd7n5mbwwh-xz-5.2.5' from 'https://cache.nixos.org'... +copying path '/nix/store/x87bqc42xmskjwf5c2a9bbzkd8aqwrca-zlib-1.2.11' from 'https://cache.nixos.org'... +copying path '/nix/store/00vcq1i9x1dwwn21dqkagybcxa1lcnwi-xz-5.2.5-bin' from 'https://cache.nixos.org'... +copying path '/nix/store/wia4ggzr91yglpq6z1lg48qnv5z4yy94-binutils-2.35.1' from 'https://cache.nixos.org'... +copying path '/nix/store/ggr82sipdfkxszl9qnbd0zg0hlqr7b1c-libssh2-1.9.0' from 'https://cache.nixos.org'... +copying path '/nix/store/0idd3d3y4vbiilwzhjq1my4npac6bp3r-libxml2-2.9.12' from 'https://cache.nixos.org'... +copying path '/nix/store/niqwc81ynr8csa9rdz9l0zg2qcjhzspb-curl-7.76.1' from 'https://cache.nixos.org'... +copying path '/nix/store/fbhp0ph0366r5614xlincnvx6qj7rlsi-llvm-7.1.0-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/w9vgngyar0ykvmamm6k0bf5k9yn9kbhb-swift-corefoundation' from 'https://cache.nixos.org'... +copying path '/nix/store/xhijdap7ln3nw867cq6m3knshww9f2rn-clang-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/lv7ycndyrcd0n8pn4v85wfjc62c8qbbv-bootstrap_cmds-121' from 'https://cache.nixos.org'... +copying path '/nix/store/f2qq98ksl340z3ncwcwx6fcjp4mc6iyy-brotli-1.0.9-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/aa3xbipcdryaf3mn9ic5y1smzlxngvfz-clang-7.1.0-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/djk25cq1wr1vs7qbybzak4a6alhrsd8x-configd-453.19' from 'https://cache.nixos.org'... +copying path '/nix/store/fpybf6mdl194p6sbnh3hq2zfm4wp7rxk-ctags-816' from 'https://cache.nixos.org'... +copying path '/nix/store/7a27b4f7c3sqbjc1vg0fjnplkqd2b27d-expat-2.4.1' from 'https://cache.nixos.org'... +copying path '/nix/store/zk81d86gikl6dm96kfylgd2yi7ji8qbm-gdbm-1.20' from 'https://cache.nixos.org'... +copying path '/nix/store/hdrwrac2r06nmcifk2asg2j7hinb5qpb-gettext-0.21' from 'https://cache.nixos.org'... +copying path '/nix/store/vxc4vzqx02hf3795vk5d9an3xvx2ml90-gnum4-1.4.18' from 'https://cache.nixos.org'... +copying path '/nix/store/c8wnza9p6j649x3j4w4rgkzcy43p2f8m-gperf-3.1' from 'https://cache.nixos.org'... +copying path '/nix/store/szcfcci0blvj5xz5snygxy3271dbzlc0-bison-3.7.6' from 'https://cache.nixos.org'... +copying path '/nix/store/qkjmhc3zlsm09rn9f4h9m410m844apbm-flex-2.6.4' from 'https://cache.nixos.org'... +copying path '/nix/store/3iz44xrsq7niprab0majz08hjm6djiv7-indent-2.2.12' from 'https://cache.nixos.org'... +copying path '/nix/store/jggqvhq5db9c0q635i8r8qvlc30g72sv-libcbor-0.8.0' from 'https://cache.nixos.org'... +copying path '/nix/store/nvypxmbcfc8yxrpgsgxhbnp6s2d4a5az-libedit-20210522-3.1' from 'https://cache.nixos.org'... +copying path '/nix/store/mrdsjbp54ys55dmav5501k59jqk5vy2b-libkrb5-1.18' from 'https://cache.nixos.org'... +copying path '/nix/store/4xp86qi542rlddjxndff141l4cyqw9ri-libxml2-2.9.12' from 'https://cache.nixos.org'... +copying path '/nix/store/np1cc9lc9jn9pxbfzkgbz24c8pxbibwa-llvm-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/5cv4vf1jybf00q4n82ys54pml0gvhr7d-lzo-2.10' from 'https://cache.nixos.org'... +copying path '/nix/store/falx4gakl6k1q64aqr4f8mvy8vfqcqaq-cctools-binutils-darwin-949.0.1' from 'https://cache.nixos.org'... +copying path '/nix/store/1ahb54by7x5h7yn8703nq25fiwdzg4ix-nghttp2-1.43.0-lib' from 'https://cache.nixos.org'... +copying path '/nix/store/4j3q5kmmdwkgppn6g8hna8wg6a4zrvxd-cctools-binutils-darwin-wrapper-949.0.1' from 'https://cache.nixos.org'... +copying path '/nix/store/shp60sgvnh5is0mcd7w86gq531ws3sxn-openssl-1.1.1k' from 'https://cache.nixos.org'... +copying path '/nix/store/jx31ah14p6pf488i7wwd88y2izmxi0wj-clang-wrapper-7.1.0' from 'https://cache.nixos.org'... +copying path '/nix/store/bmxvqnmx5afyvhk3iciapgfd71yxdzp7-libfido2-1.7.0' from 'https://cache.nixos.org'... +copying path '/nix/store/m8jmp0ffs11r0bigxd107v4j58r1amss-libssh2-1.9.0' from 'https://cache.nixos.org'... +copying path '/nix/store/pzjrm7c57jqpsajpbfzrx093l7dm60v6-openssh-8.6p1' from 'https://cache.nixos.org'... +copying path '/nix/store/b2wj786d4qizfamzwhc6qfsb1yaaqsm5-curl-7.76.1' from 'https://cache.nixos.org'... +copying path '/nix/store/6y7x4wlzbhf5m0lzacykmgnz4ln6dmgy-openvpn-2.5.2' from 'https://cache.nixos.org'... +copying path '/nix/store/mjlkzmwrih5bz2hzi9yzm5ngridf0n53-pcre2-10.36' from 'https://cache.nixos.org'... +copying path '/nix/store/1xjwb0bd02n1y621mjqani68kh6n6gig-perl-5.34.0' from 'https://cache.nixos.org'... +copying path '/nix/store/6wf3aa69wzi83lxz1gy41y90fiypz767-perl5.34.0-FCGI-0.79' from 'https://cache.nixos.org'... +copying path '/nix/store/sdbsgmdc9xvlngvd2m2nr2vvzyzbcmyh-perl5.34.0-HTML-Parser-3.75' from 'https://cache.nixos.org'... +copying path '/nix/store/xz9n2vz4jpkq81x0wxjsxg986gc4pjz1-perl5.34.0-TermReadKey-2.38' from 'https://cache.nixos.org'... +copying path '/nix/store/8a8c4ahdnnp3p8nf1r4q2g881yz0cp7v-perl5.34.0-CGI-4.51' from 'https://cache.nixos.org'... +copying path '/nix/store/qmzjj3g1rbqyhi84ii8chr0g7sh8ahm0-perl5.34.0-libwww-perl-6.49' from 'https://cache.nixos.org'... +copying path '/nix/store/5mkv08mg9kk2gx12j0abfp5x348yf691-perl5.34.0-CGI-Fast-2.15' from 'https://cache.nixos.org'... +copying path '/nix/store/mkr49brmqqad1yjac4lgffl1rjk0bf6m-readline-6.3p08' from 'https://cache.nixos.org'... +copying path '/nix/store/va4ipz29l8a1v9gvw7x3r5pp5zwv254b-sqlite-3.35.5' from 'https://cache.nixos.org'... +copying path '/nix/store/3yq0m0kprzf172ljfwdl6sidvf6v6j9i-stdenv-darwin' from 'https://cache.nixos.org'... +copying path '/nix/store/ad19ya4siaxhz63w0m8xh97067iahks0-python3-3.9.6' from 'https://cache.nixos.org'... +copying path '/nix/store/d8b3d560b7jyszsz0j4bx7qpxmagnfij-unifdef-2.12' from 'https://cache.nixos.org'... +copying path '/nix/store/ihlm88458pygspjfd44rzfl304rg362s-git-2.32.0' from 'https://cache.nixos.org'... +copying path '/nix/store/5zwjd1qglpnipr0f2j5lf8lngp5244b0-Toolchains' from 'https://cache.nixos.org'... +copying path '/nix/store/dmlj5yb048cs7njcrc52hnzk294vd2bd-xcbuild-0.1.2-pre' from 'https://cache.nixos.org'... +copying path '/nix/store/d051nmxsz58mjirkwg5x8jlbd50gy1ld-xcodebuild-0.1.2-pre' from 'https://cache.nixos.org'... +copying path '/nix/store/g63nbvmh88c0l1kz3zq5x48dam13jdcs-go-1.16.6' from 'https://cache.nixos.org'... +building '/nix/store/yw6ndp40vvpa5784j8j804sirkpybm56-kubebuilder-3.1.0.drv'... +unpacking sources +unpacking source archive /nix/store/l2mvac03b398x7jnhbqdf9051k4rsini-source +source root is source +patching sources +configuring +building +Building subPackage ./cmd +golang.org/x/crypto/cryptobyte/asn1 +github.com/cloudflare/cfssl/info +google.golang.org/protobuf/internal/flags +google.golang.org/protobuf/internal/set +golang.org/x/text/transform +github.com/spf13/afero/mem +golang.org/x/text/unicode/norm +github.com/spf13/pflag +github.com/cloudflare/cfssl/auth +github.com/cloudflare/cfssl/errors +golang.org/x/crypto/ed25519 +github.com/cloudflare/cfssl/log +github.com/cloudflare/cfssl/crypto/pkcs7 +github.com/cloudflare/cfssl/helpers/derhelpers +github.com/google/certificate-transparency-go/asn1 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/ocsp +golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/pkcs12 +github.com/google/certificate-transparency-go/tls +github.com/google/certificate-transparency-go/x509/pkix +github.com/cloudflare/cfssl/ocsp/config +golang.org/x/text/unicode/bidi +github.com/spf13/afero +github.com/google/certificate-transparency-go/x509 +golang.org/x/text/secure/bidirule +golang.org/x/net/idna +github.com/weppos/publicsuffix-go/publicsuffix +github.com/zmap/zcrypto/json +github.com/spf13/cobra +github.com/zmap/zcrypto/util +github.com/zmap/zcrypto/x509/ct +github.com/zmap/zcrypto/x509/pkix +github.com/jmoiron/sqlx/types +github.com/cloudflare/cfssl/certdb +google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/errors +google.golang.org/protobuf/encoding/protowire +google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/reflect/protoreflect +google.golang.org/protobuf/reflect/protoregistry +google.golang.org/protobuf/internal/strs +google.golang.org/protobuf/internal/encoding/text +google.golang.org/protobuf/internal/encoding/messageset +google.golang.org/protobuf/internal/genid +google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/fieldsort +google.golang.org/protobuf/runtime/protoiface +google.golang.org/protobuf/proto +google.golang.org/protobuf/internal/descfmt +google.golang.org/protobuf/internal/descopts +google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/version +golang.org/x/net/context/ctxhttp +golang.org/x/net/context +github.com/cloudflare/cfssl/api +github.com/cloudflare/cfssl/api/client +github.com/zmap/zcrypto/x509 +github.com/gobuffalo/here +github.com/markbates/pkger/here +github.com/markbates/pkger/pkging +google.golang.org/protobuf/encoding/prototext +google.golang.org/protobuf/internal/filedesc +github.com/markbates/pkger/internal/maps +github.com/markbates/pkger/pkging/stdos +github.com/markbates/pkger +github.com/gogo/protobuf/proto +google.golang.org/protobuf/internal/encoding/tag +google.golang.org/protobuf/internal/impl +github.com/zmap/zlint/v2/util +github.com/zmap/zlint/v2/lint +github.com/zmap/zlint/v2/lints/apple +github.com/zmap/zlint/v2/lints/cabf_br +github.com/zmap/zlint/v2/lints/cabf_ev +github.com/zmap/zlint/v2/lints/community +github.com/zmap/zlint/v2/lints/etsi +github.com/zmap/zlint/v2/lints/mozilla +github.com/zmap/zlint/v2/lints/rfc +github.com/zmap/zlint/v2 +github.com/gogo/protobuf/sortkeys +github.com/google/gofuzz +gopkg.in/inf.v0 +k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/validation +github.com/go-logr/logr +k8s.io/klog/v2 +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/runtime +github.com/modern-go/concurrent +gopkg.in/yaml.v2 +github.com/modern-go/reflect2 +google.golang.org/protobuf/internal/filetype +google.golang.org/protobuf/runtime/protoimpl +github.com/golang/protobuf/proto +google.golang.org/protobuf/types/known/anypb +github.com/golang/protobuf/ptypes/any +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/timestamppb +github.com/golang/protobuf/ptypes/duration +k8s.io/apimachinery/pkg/types +github.com/golang/protobuf/ptypes/timestamp +github.com/json-iterator/go +k8s.io/apimachinery/pkg/util/intstr +github.com/golang/protobuf/ptypes +github.com/google/certificate-transparency-go/client/configpb +golang.org/x/net/http/httpguts +golang.org/x/net/http2/hpack +github.com/gobuffalo/flect +golang.org/x/net/http2 +k8s.io/utils/pointer +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/go/gcexportdata +golang.org/x/mod/semver +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event +golang.org/x/tools/internal/gocommand +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/typesinternal +golang.org/x/xerrors/internal +golang.org/x/xerrors +golang.org/x/tools/go/packages +sigs.k8s.io/structured-merge-diff/v4/value +k8s.io/apimachinery/pkg/util/net +sigs.k8s.io/yaml +sigs.k8s.io/controller-tools/pkg/loader +k8s.io/apimachinery/pkg/runtime +sigs.k8s.io/controller-tools/pkg/version +sigs.k8s.io/controller-tools/pkg/markers +github.com/markbates/pkger/pkging/embed +github.com/markbates/pkger/pkging/mem +sigs.k8s.io/kubebuilder/v3 +github.com/go-errors/errors +sigs.k8s.io/kustomize/kyaml/errors +github.com/monochromegane/go-gitignore +github.com/davecgh/go-spew/spew +sigs.k8s.io/controller-tools/pkg/genall +github.com/pmezard/go-difflib/difflib +gopkg.in/yaml.v3 +github.com/xlab/treeprint +sigs.k8s.io/kustomize/kyaml/ext +github.com/mailru/easyjson/jlexer +github.com/mailru/easyjson/buffer +github.com/mailru/easyjson/jwriter +github.com/go-openapi/swag +github.com/go-openapi/jsonpointer +k8s.io/apimachinery/pkg/watch +github.com/PuerkitoBio/urlesc +golang.org/x/text/width +k8s.io/apimachinery/pkg/apis/meta/v1 +github.com/PuerkitoBio/purell +github.com/go-openapi/jsonreference +github.com/go-openapi/spec +github.com/stretchr/testify/assert +sigs.k8s.io/kustomize/kyaml/sets +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels +sigs.k8s.io/kustomize/kyaml/yaml +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1184 +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1186 +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1188 +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1190 +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1191 +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi +sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi +sigs.k8s.io/kubebuilder/v3/pkg/internal/validation +sigs.k8s.io/kustomize/kyaml/kio/kioutil +sigs.k8s.io/kustomize/kyaml/openapi +sigs.k8s.io/kustomize/kyaml/kio +sigs.k8s.io/kustomize/kyaml/fieldmeta +sigs.k8s.io/kustomize/kyaml/yaml/schema +sigs.k8s.io/kustomize/kyaml/yaml/walk +sigs.k8s.io/kustomize/kyaml/yaml/merge2 +sigs.k8s.io/kustomize/kyaml/yaml/merge3 +sigs.k8s.io/kubebuilder/v3/pkg/model/resource +sigs.k8s.io/kustomize/kyaml/kio/filters +sigs.k8s.io/kubebuilder/v3/pkg/model/stage +sigs.k8s.io/kubebuilder/v3/pkg/config +github.com/google/certificate-transparency-go +sigs.k8s.io/kubebuilder/v3/pkg/config/store +golang.org/x/mod/module +github.com/cloudflare/cfssl/helpers +github.com/google/certificate-transparency-go/jsonclient +sigs.k8s.io/kustomize/kyaml/fn/framework +github.com/google/certificate-transparency-go/client +github.com/cloudflare/cfssl/config +github.com/cloudflare/cfssl/csr +golang.org/x/tools/go/ast/astutil +github.com/cloudflare/cfssl/signer +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gopathwalk +sigs.k8s.io/kubebuilder/v3/pkg/config/v2 +github.com/cloudflare/cfssl/signer/local +github.com/cloudflare/cfssl/signer/remote +github.com/cloudflare/cfssl/selfsign +github.com/cloudflare/cfssl/signer/universal +github.com/cloudflare/cfssl/initca +github.com/cloudflare/cfssl/cli +sigs.k8s.io/kubebuilder/v3/pkg/config/v3 +golang.org/x/tools/internal/imports +github.com/cloudflare/cfssl/cli/genkey +sigs.k8s.io/kubebuilder/v3/pkg/plugin/util +k8s.io/apimachinery/pkg/api/equality +k8s.io/api/rbac/v1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/api/admissionregistration/v1beta1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +sigs.k8s.io/controller-tools/pkg/rbac +sigs.k8s.io/controller-tools/pkg/webhook +golang.org/x/tools/imports +sigs.k8s.io/kubebuilder/v3/pkg/machinery +sigs.k8s.io/kubebuilder/v3/pkg/config/store/yaml +sigs.k8s.io/kubebuilder/v3/pkg/plugin +sigs.k8s.io/kubebuilder/v3/pkg/plugins +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/certmanager +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/crd +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/crd/patches +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/kdefault +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/manager +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/prometheus +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/rbac +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/samples +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/webhook +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/declarative/v1/internal/templates +sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1 +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/api +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/certmanager +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/crd/patches +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/crd +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/kdefault +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/manager +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/prometheus +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/rbac +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/samples +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/webhook +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/controllers +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/hack +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/api +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/controllers +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2 +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/hack +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/declarative/v1 +sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3 +sigs.k8s.io/controller-tools/pkg/crd/markers +sigs.k8s.io/controller-tools/pkg/crd +sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen +sigs.k8s.io/kubebuilder/v3/pkg/cli +sigs.k8s.io/kubebuilder/v3/cmd +Building subPackage ./pkg/... +sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen/examples/advancedextension +sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen/examples/basicextension +running tests +? sigs.k8s.io/kubebuilder/v3/cmd [no test files] +=== RUN TestCLI +Running Suite: CLI Suite +======================== +Random Seed: 1654011702 +Will run 87 of 87 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 87 of 87 Specs in 0.006 seconds +SUCCESS! -- 87 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestCLI (0.03s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/cli 1.457s +=== RUN TestNewCommand +=== RUN TestNewCommand/testdata/componentconfig +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/default +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/disableauthproxy +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/enablecertmanager +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/enableconversionwebhooks +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/enableprometheus +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +=== RUN TestNewCommand/testdata/enablewebhooks +../project/api/v1alpha1/groupversion_info.go:24:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime/pkg/scheme +../project/controllers/bar_controller.go:20:2: cannot find package "." in: + /private/tmp/nix-build-kubebuilder-3.1.0.drv-0/source/vendor/sigs.k8s.io/controller-runtime +error running controller-gen +--- PASS: TestNewCommand (5.16s) + --- PASS: TestNewCommand/testdata/componentconfig (2.74s) + --- PASS: TestNewCommand/testdata/default (0.65s) + --- PASS: TestNewCommand/testdata/disableauthproxy (0.35s) + --- PASS: TestNewCommand/testdata/enablecertmanager (0.35s) + --- PASS: TestNewCommand/testdata/enableconversionwebhooks (0.34s) + --- PASS: TestNewCommand/testdata/enableprometheus (0.34s) + --- PASS: TestNewCommand/testdata/enablewebhooks (0.38s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen 6.208s +? sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen/examples/advancedextension [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/cli/alpha/config-gen/examples/basicextension [no test files] +=== RUN TestConfig +Running Suite: Config Suite +=========================== +Random Seed: 1654011698 +Will run 65 of 65 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 65 of 65 Specs in 0.001 seconds +SUCCESS! -- 65 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestConfig (0.01s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/config 0.394s +=== RUN TestConfigStore +Running Suite: Config Store Suite +================================= +Random Seed: 1654011698 +Will run 4 of 4 specs + +•••• +Ran 4 of 4 Specs in 0.000 seconds +SUCCESS! -- 4 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestConfigStore (0.00s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/config/store 0.316s +=== RUN TestConfigStoreYaml +Running Suite: Config Store YAML Suite +====================================== +Random Seed: 1654011699 +Will run 21 of 21 specs + +••••••••••••••••••••• +Ran 21 of 21 Specs in 0.003 seconds +SUCCESS! -- 21 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestConfigStoreYaml (0.01s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/config/store/yaml 0.266s +=== RUN TestConfigV2 +Running Suite: Config V2 Suite +============================== +Random Seed: 1654011700 +Will run 41 of 41 specs + +••••••••••••••••••••••••••••••••••••••••• +Ran 41 of 41 Specs in 0.002 seconds +SUCCESS! -- 41 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestConfigV2 (0.01s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/config/v2 0.429s +=== RUN TestConfigV3 +Running Suite: Config V3 Suite +============================== +Random Seed: 1654011701 +Will run 49 of 49 specs + +••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 49 of 49 Specs in 0.003 seconds +SUCCESS! -- 49 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestConfigV3 (0.02s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/config/v3 0.256s +=== RUN TestValidation +Running Suite: Validation Suite +=============================== +Random Seed: 1654011701 +Will run 7 of 7 specs + +••••••• +Ran 7 of 7 Specs in 0.001 seconds +SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestValidation (0.00s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/internal/validation 0.637s +=== RUN TestMachinery +Running Suite: Machinery suite +============================== +Random Seed: 1654011703 +Will run 83 of 83 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 83 of 83 Specs in 0.003 seconds +SUCCESS! -- 83 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestMachinery (0.02s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/machinery 0.305s +=== RUN TestResource +Running Suite: Resource Suite +============================= +Random Seed: 1654011703 +Will run 131 of 131 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 131 of 131 Specs in 0.002 seconds +SUCCESS! -- 131 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestResource (0.03s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/model/resource 0.494s +=== RUN TestStage +Running Suite: Stage Suite +========================== +Random Seed: 1654011703 +Will run 24 of 24 specs + +•••••••••••••••••••••••• +Ran 24 of 24 Specs in 0.001 seconds +SUCCESS! -- 24 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestStage (0.01s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/model/stage 0.438s +=== RUN TestPlugin +Running Suite: Plugin Suite +=========================== +Random Seed: 1654011704 +Will run 101 of 101 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 101 of 101 Specs in 0.002 seconds +SUCCESS! -- 101 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestPlugin (0.08s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/plugin 0.438s +=== RUN TestPlugin +Running Suite: Plugin Util Suite +================================ +Random Seed: 1654011704 +Will run 5 of 5 specs + +••••• +Ran 5 of 5 Specs in 0.000 seconds +SUCCESS! -- 5 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestPlugin (0.00s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/plugin/util 0.289s +? sigs.k8s.io/kubebuilder/v3/pkg/plugins [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1 [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/certmanager [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/crd [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/crd/patches [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/kdefault [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/manager [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/prometheus [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/rbac [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/samples [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/common/kustomize/v1/scaffolds/internal/templates/config/webhook [no test files] +=== RUN TestGoPlugin +Running Suite: Go Plugin Suite +============================== +Random Seed: 1654011705 +Will run 77 of 77 specs + +••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• +Ran 77 of 77 Specs in 0.002 seconds +SUCCESS! -- 77 Passed | 0 Failed | 0 Pending | 0 Skipped +--- PASS: TestGoPlugin (0.02s) +PASS +ok sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang 0.271s +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/declarative/v1 [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/declarative/v1/internal/templates [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2 [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/api [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/certmanager [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/crd [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/crd/patches [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/kdefault [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/manager [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/prometheus [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/rbac [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/samples [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/config/webhook [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/controllers [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v2/scaffolds/internal/templates/hack [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3 [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/api [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/controllers [no test files] +? sigs.k8s.io/kubebuilder/v3/pkg/plugins/golang/v3/scaffolds/internal/templates/hack [no test files] +installing +post-installation fixup +strip is /nix/store/5zwjd1qglpnipr0f2j5lf8lngp5244b0-Toolchains/XcodeDefault.xctoolchain/bin/strip +stripping (with command strip and flags -S) in /nix/store/b6a7n0ppv6yj5lgl6dd3pvk5x330qvmb-kubebuilder-3.1.0/bin +patching script interpreter paths in /nix/store/b6a7n0ppv6yj5lgl6dd3pvk5x330qvmb-kubebuilder-3.1.0 + +[nix-shell:~/github/source-watcher]$ go version +go version go1.16.6 darwin/amd64 + +[nix-shell:~/github/source-watcher]$ ls .. +dataModels kitt4sme.code-workspace ngsi-timeseries-api.wiki osmops.demo storywine-api +ekz-sc.odds-n-ends kitt4sme.dazzler nixie osmops.pkgs storywine-app +ekz-sc.platform kitt4sme.fipy ome-odd-n-ends peml storywine-playground +gitops-playground kitt4sme.flaw-sleuth opa-envoy-plugin platos-cave themes +haskell-project-template kitt4sme.live orchestracities.charts profies trixie-dotses +hasnix kitt4sme.roughnator orchestracities.keycloak-scripts resto +kitt4sme kitt4sme.wiki orchestracities.platform sdk +kitt4sme.anomaly ngsi-timeseries-api osmops.code-workspace source-watcher + +[nix-shell:~/github/source-watcher]$ code ../osmops.code-workspace + +[nix-shell:~/github/source-watcher]$ go test ./... +? github.com/fluxcd/source-watcher [no test files] +? github.com/fluxcd/source-watcher/controllers [no test files] +ok github.com/fluxcd/source-watcher/osmops/cfg 0.346s +ok github.com/fluxcd/source-watcher/osmops/engine 0.258s +ok github.com/fluxcd/source-watcher/osmops/nbic 0.296s +ok github.com/fluxcd/source-watcher/osmops/util 0.195s +ok github.com/fluxcd/source-watcher/osmops/util/http 0.312s +ok github.com/fluxcd/source-watcher/osmops/util/http/sec 0.183s + +[nix-shell:~/github/source-watcher]$ make test +/Users/andrea/go/bin//controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..." +go fmt ./... +go vet ./... +/Users/andrea/go/bin//controller-gen crd:crdVersions=v1 rbac:roleName=source-reader webhook paths="./..." output:crd:artifacts:config=config/crd/bases +go test ./... -coverprofile cover.out +? github.com/fluxcd/source-watcher [no test files] +? github.com/fluxcd/source-watcher/controllers [no test files] +ok github.com/fluxcd/source-watcher/osmops/cfg 0.239s coverage: 99.0% of statements +ok github.com/fluxcd/source-watcher/osmops/engine 0.277s coverage: 96.8% of statements +ok github.com/fluxcd/source-watcher/osmops/nbic 0.452s coverage: 99.1% of statements +ok github.com/fluxcd/source-watcher/osmops/util 0.413s coverage: 98.7% of statements +ok github.com/fluxcd/source-watcher/osmops/util/http 0.415s coverage: 100.0% of statements +ok github.com/fluxcd/source-watcher/osmops/util/http/sec 0.315s coverage: 100.0% of statements + +[nix-shell:~/github/source-watcher]$ go mod --help +go mod --help: unknown command +Run 'go help mod' for usage. + +[nix-shell:~/github/source-watcher]$ go help mod +Go mod provides access to operations on modules. + +Note that support for modules is built into all the go commands, +not just 'go mod'. For example, day-to-day adding, removing, upgrading, +and downgrading of dependencies should be done using 'go get'. +See 'go help modules' for an overview of module functionality. + +Usage: + + go mod [arguments] + +The commands are: + + download download modules to local cache + edit edit go.mod from tools or scripts + graph print module requirement graph + init initialize new module in current directory + tidy add missing and remove unused modules + vendor make vendored copy of dependencies + verify verify dependencies have expected content + why explain why packages or modules are needed + +Use "go help mod " for more information about a command. + + +[nix-shell:~/github/source-watcher]$ go help get +usage: go get [-d] [-t] [-u] [-v] [-insecure] [build flags] [packages] + +Get resolves its command-line arguments to packages at specific module versions, +updates go.mod to require those versions, downloads source code into the +module cache, then builds and installs the named packages. + +To add a dependency for a package or upgrade it to its latest version: + + go get example.com/pkg + +To upgrade or downgrade a package to a specific version: + + go get example.com/pkg@v1.2.3 + +To remove a dependency on a module and downgrade modules that require it: + + go get example.com/mod@none + +See https://golang.org/ref/mod#go-get for details. + +The 'go install' command may be used to build and install packages. When a +version is specified, 'go install' runs in module-aware mode and ignores +the go.mod file in the current directory. For example: + + go install example.com/pkg@v1.2.3 + go install example.com/pkg@latest + +See 'go help install' or https://golang.org/ref/mod#go-install for details. + +In addition to build flags (listed in 'go help build') 'go get' accepts the +following flags. + +The -t flag instructs get to consider modules needed to build tests of +packages specified on the command line. + +The -u flag instructs get to update modules providing dependencies +of packages named on the command line to use newer minor or patch +releases when available. + +The -u=patch flag (not -u patch) also instructs get to update dependencies, +but changes the default to select patch releases. + +When the -t and -u flags are used together, get will update +test dependencies as well. + +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP, and also bypassess +module sum validation using the checksum database. Use with caution. +This flag is deprecated and will be removed in a future version of go. +To permit the use of insecure schemes, use the GOINSECURE environment +variable instead. To bypass module sum validation, use GOPRIVATE or +GONOSUMDB. See 'go help environment' for details. + +The -d flag instructs get not to build or install packages. get will only +update go.mod and download source code needed to build packages. + +Building and installing packages with get is deprecated. In a future release, +the -d flag will be enabled by default, and 'go get' will be only be used to +adjust dependencies of the current module. To install a package using +dependencies from the current module, use 'go install'. To install a package +ignoring the current module, use 'go install' with an @version suffix like +"@latest" after each argument. + +For more about modules, see https://golang.org/ref/mod. + +For more about specifying packages, see 'go help packages'. + +This text describes the behavior of get using modules to manage source +code and dependencies. If instead the go command is running in GOPATH +mode, the details of get's flags and effects change, as does 'go help get'. +See 'go help gopath-get'. + +See also: go build, go install, go clean, go mod. + +[nix-shell:~/github/source-watcher]$ go get github.com/walle/targz +go: downloading github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a +go get: added github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a + +[nix-shell:~/github/source-watcher]$ make run +/Users/andrea/go/bin//controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..." +go fmt ./... +go vet ./... +/Users/andrea/go/bin//controller-gen crd:crdVersions=v1 rbac:roleName=source-reader webhook paths="./..." output:crd:artifacts:config=config/crd/bases +go run ./main.go + + +[nix-shell:~/github/source-watcher]$ ls +CODE_OF_CONDUCT.md Dockerfile PROJECT _tmp config docs go.sum nbi-connection.yaml shell.nix +CONTRIBUTING.md LICENSE README.md bin controllers federico.md hack osm_ops_config.yaml tmp.yaml +DCO Makefile _deployment_ build cover.out go.mod main.go osmops xxx.tar.gz + +[nix-shell:~/github/source-watcher]$ tar -tzf xxx.tar.gz +tar: Removing leading `/' from member names +/osmops/.DS_Store +/osmops/cfg/fsvisitor.go +/osmops/cfg/fsvisitor_test.go +/osmops/cfg/store.go +/osmops/cfg/store_test.go +/osmops/cfg/store_test_dir/test_1/deploy.me/secret.yaml +/osmops/cfg/store_test_dir/test_1/osm_ops_config.yaml +/osmops/cfg/store_test_dir/test_2/config.yaml +/osmops/cfg/store_test_dir/test_3/osm_ops_config.yaml +/osmops/cfg/store_test_dir/test_4/osm_ops_config.yaml +/osmops/cfg/store_test_dir/test_5/osm_ops_config.yaml +/osmops/cfg/store_test_dir/test_5/secret.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/ignore1.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/k1.ops.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/ignore2.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k2.ops.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k3.ops.yaml +/osmops/cfg/store_test_dir/test_6/deploy.me/secret.yaml +/osmops/cfg/store_test_dir/test_6/osm_ops_config.yaml +/osmops/cfg/yamlreader.go +/osmops/cfg/yamlreader_test.go +/osmops/cfg/yamltypes.go +/osmops/cfg/yamltypes_test.go +/osmops/engine/mocks_test.go +/osmops/engine/reconcile.go +/osmops/engine/reconcile_test.go +/osmops/engine/reconcile_test_dir/test_1/osm_ops_config.yaml +/osmops/engine/reconcile_test_dir/test_2/deploy.me/secret.yaml +/osmops/engine/reconcile_test_dir/test_2/osm_ops_config.yaml +/osmops/engine/reconcile_test_dir/test_3/deploy.me/k1.ops.yaml +/osmops/engine/reconcile_test_dir/test_3/deploy.me/k2.ops.yaml +/osmops/engine/reconcile_test_dir/test_3/deploy.me/k3.ops.yaml +/osmops/engine/reconcile_test_dir/test_3/deploy.me/secret.yaml +/osmops/engine/reconcile_test_dir/test_3/osm_ops_config.yaml +/osmops/engine/tmp_test.go +/osmops/nbic/auth.go +/osmops/nbic/auth_test.go +/osmops/nbic/client.go +/osmops/nbic/client_test.go +/osmops/nbic/enpoints.go +/osmops/nbic/nbi_data_test.go +/osmops/nbic/nbi_test.go +/osmops/nbic/nsdescriptors.go +/osmops/nbic/nsdescriptors_test.go +/osmops/nbic/nsinstances.go +/osmops/nbic/nsinstances_test.go +/osmops/nbic/vimaccounts.go +/osmops/nbic/vimaccounts_test.go +/osmops/util/http/builders.go +/osmops/util/http/builders_test.go +/osmops/util/http/client.go +/osmops/util/http/client_test.go +/osmops/util/http/reshandlers.go +/osmops/util/http/reshandlers_test.go +/osmops/util/http/sec/session.go +/osmops/util/http/sec/session_test.go +/osmops/util/http/sec/token.go +/osmops/util/http/sec/token_test.go +/osmops/util/types.go +/osmops/util/types_test.go + +[nix-shell:~/github/source-watcher]$ tar -tzf _tmp/ +.DS_Store osm-install/ osm-install-issues/ osm-mitm/ osm-pkgs/ + +[nix-shell:~/github/source-watcher]$ tar -tzf _tmp/osm-pkgs/ +README.md openldap_knf/ openldap_knf.tar.gz openldap_ns/ openldap_ns.tar.gz + +[nix-shell:~/github/source-watcher]$ tar -tzf _tmp/osm-pkgs/openldap_ns.tar.gz +openldap_ns/ +openldap_ns/openldap_nsd.yaml +openldap_ns/README.md +openldap_ns/checksums.txt + +[nix-shell:~/github/source-watcher]$ tar -tzf ~/Downloads/openldap_ns.tar.gz +openldap_ns/ +openldap_ns/openldap_nsd.yaml +openldap_ns/README.md +openldap_ns/checksums.txt + +[nix-shell:~/github/source-watcher]$ rm ~/Downloads/openldap_ns.tar.gz + +[nix-shell:~/github/source-watcher]$ rm xxx.tar.gz + +[nix-shell:~/github/source-watcher]$ make run +/Users/andrea/go/bin//controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..." +go fmt ./... +go vet ./... +/Users/andrea/go/bin//controller-gen crd:crdVersions=v1 rbac:roleName=source-reader webhook paths="./..." output:crd:artifacts:config=config/crd/bases +go run ./main.go + + +[nix-shell:~/github/source-watcher]$ tar -tzf xxx.tar.gz | less + +[nix-shell:~/github/source-watcher]$ diff --git a/_tmp/osmops-packaging/http-calls.md b/_tmp/osmops-packaging/http-calls.md new file mode 100644 index 0000000..0f2d45b --- /dev/null +++ b/_tmp/osmops-packaging/http-calls.md @@ -0,0 +1,109 @@ +Get a token. + +```console +curl -v 192.168.64.22/osm/admin/v1/tokens \ + -H 'Accept: application/json' -H 'Content-Type: application/yaml' \ + -d '{"username": "admin", "password": "admin", "project_id": "admin"}' + +export OSM_TOKEN=wa7FDNWma96ODtC0PofsQoi1GBAi7Ah6 +``` + +Create OpenLDAP KNF package using original OSM package. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content \ + -H "Authorization: Bearer ${OSM_TOKEN}" \ + -H 'Accept: application/json' -H 'Content-Type: application/gzip' \ + -H 'Content-Filename: openldap_knf.tar.gz' \ + -H 'Content-File-MD5: 2a7d74587151e9fd0c1fd727003b8a1b' \ + --data-binary @../osm-pkgs/openldap_knf.tar.gz +``` + +List all KNF packages in YAML format. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content \ + -H "Authorization: Bearer ${OSM_TOKEN}" +``` + +Delete OpenLDAP KNF package. +NOTE: can't use ID declared in the package (`openldap_knf`); you've +got to use OSM's own ID (`_id` field). + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content/cc10f9ff-64d2-44c1-a096-95ce17b32b70 \ + -X DELETE \ + -H "Authorization: Bearer ${OSM_TOKEN}" +``` + + +Create OpenLDAP KNF package using OSMOps-generated package. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content \ + -H "Authorization: Bearer ${OSM_TOKEN}" \ + -H 'Accept: application/json' -H 'Content-Type: application/gzip' \ + -H 'Content-Filename: openldap_knf.tar.gz' \ + -H 'Content-File-MD5: 92821dce2b09c67cc17c780037f3ff03' \ + --data-binary @osmops-generated/openldap_knf.tar.gz +``` + +Update OpenLDAP KNF package using OSMOps-generated package. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content/openldap_knf \ + -X PUT \ + -H "Authorization: Bearer ${OSM_TOKEN}" \ + -H 'Accept: application/json' -H 'Content-Type: application/gzip' \ + -H 'Content-Filename: openldap_knf.tar.gz' \ + -H 'Content-File-MD5: 92821dce2b09c67cc17c780037f3ff03' \ + --data-binary @osmops-generated/openldap_knf.tar.gz +``` + +Apparently you can't PUT the tgz. Notice you get the same error if +you use the OSM KNF package ID: + +- /osm/vnfpkgm/v1/vnf_packages_content/943a86dc-a90e-4add-be34-571f3e90f41b + +```log +2022-06-17T09:09:58 INFO nbi.server _cplogging.py:213 [17/Jun/2022:09:09:58] CRITICAL: Exception 'RequestBody' object has no attribute 'get' +Traceback (most recent call last): + File "/usr/lib/python3/dist-packages/osm_nbi/nbi.py", line 1585, in default + op_id = self.engine.edit_item( + File "/usr/lib/python3/dist-packages/osm_nbi/engine.py", line 372, in edit_item + return self.map_topic[topic].edit(session, _id, indata, kwargs) + File "/usr/lib/python3/dist-packages/osm_nbi/base_topic.py", line 630, in edit + indata = self._remove_envelop(indata) + File "/usr/lib/python3/dist-packages/osm_nbi/descriptor_topics.py", line 628, in _remove_envelop + if clean_indata.get("etsi-nfv-vnfd:vnfd"): +AttributeError: 'RequestBody' object has no attribute 'get' +2022-06-17T09:09:58 INFO nbi.access _cplogging.py:283 10.244.0.1 - admin/admin;session=Q9RyqcFgHNCP [17/Jun/2022:09:09:58] "PUT /osm/vnfpkgm/v1/vnf_packages_content/openldap_knf HTTP/1.0" 400 110 "" "curl/7.64.1" +``` + +Update OpenLDAP KNF descriptor. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content/4ffdeb67-92e7-46fa-9fa2-331a4d674137 \ + -X PUT \ + -H "Authorization: Bearer ${OSM_TOKEN}" \ + -H 'Accept: application/json' -H 'Content-Type: application/yaml' \ + --data-binary @../osm-pkgs/openldap_knf/openldap_vnfd.yaml +``` + + +List all NS packages in YAML format. + +```console +curl -v 192.168.64.22/osm/nsd/v1/ns_descriptors_content \ + -H "Authorization: Bearer ${OSM_TOKEN}" +``` + +Update OpenLDAP NS descriptor. + +```console +curl -v 192.168.64.22/osm/nsd/v1/ns_descriptors_content/6cb736be-8a59-4c60-a979-22328b8094d4 \ + -X PUT \ + -H "Authorization: Bearer ${OSM_TOKEN}" \ + -H 'Accept: application/json' -H 'Content-Type: application/yaml' \ + --data-binary @../osm-pkgs/openldap_ns/openldap_nsd.yaml +``` diff --git a/_tmp/osmops-packaging/knf-pkg-list.md b/_tmp/osmops-packaging/knf-pkg-list.md new file mode 100644 index 0000000..98a24a3 --- /dev/null +++ b/_tmp/osmops-packaging/knf-pkg-list.md @@ -0,0 +1,53 @@ +KNF package list example +------------------------ + +Example output of a `GET` on `/osm/vnfpkgm/v1/vnf_packages_content`. + +```yaml +- _admin: + created: 1655475517.840946 + modified: 1655478654.0081894 + onboardingState: ONBOARDED + operationalState: ENABLED + projects_read: + - c9e9cf6f-98a4-45f8-b18d-b70d93422d88 + projects_write: + - c9e9cf6f-98a4-45f8-b18d-b70d93422d88 + storage: + descriptor: openldap_knf/openldap_vnfd.yaml + folder: 4ffdeb67-92e7-46fa-9fa2-331a4d674137 + fs: mongo + path: /app/storage/ + pkg-dir: openldap_knf + zipfile: openldap_knf.tar.gz + usageState: NOT_IN_USE + userDefinedData: {} + _id: 4ffdeb67-92e7-46fa-9fa2-331a4d674137 + _links: + packageContent: + href: /vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137/package_content + self: + href: /vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137 + vnfd: + href: /vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137/vnfd + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - helm-chart: stable/openldap:1.2.7 + name: ldap + mgmt-cp: mgmt-ext + onboardingState: ONBOARDED + operationalState: ENABLED + product-name: openldap_knf + provider: Telefonica + usageState: NOT_IN_USE + version: '1.0' +``` \ No newline at end of file diff --git a/_tmp/osmops-packaging/nbi-pkg-handler.md b/_tmp/osmops-packaging/nbi-pkg-handler.md new file mode 100644 index 0000000..095dac2 --- /dev/null +++ b/_tmp/osmops-packaging/nbi-pkg-handler.md @@ -0,0 +1,54 @@ +Uploaded corrupt tgz stream to `vnf_packages_content`. This way I was +able to get a clue of what NBI does by looking at the Python exception +stack trace in the logs. + +```console +curl -v 192.168.64.22/osm/vnfpkgm/v1/vnf_packages_content \ + -H 'Authorization: Bearer PxtGMSVAy1LJ2COGAFiYm7ctXI22CG7i' \ + -H 'Accept: application/json' -H 'Content-Type: application/gzip' \ + -H 'Content-Filename: openldap_knf.tar.gz' \ + -H 'Content-File-MD5: 2a7d74587151e9fd0c1fd727003b8a1b' \ + -d @../osm-pkgs/openldap_knf.tar.gz +``` + +Notice `curl` switch should've been `--data-binary`, not `-d` which +probably treats the file as text. + +```console +$ multipass shell osm2 +$ kubectl -n osm logs nbi-6f5fd9ff89-8xpkw +``` + +```log +2022-06-16T16:12:22 INFO nbi.server _cplogging.py:213 [16/Jun/2022:16:12:22] CRITICAL: Exception Compressed file ended before the end-of-stream marker was reached +Traceback (most recent call last): + File "/usr/lib/python3/dist-packages/osm_nbi/nbi.py", line 1417, in default + completed = self.engine.upload_content( + File "/usr/lib/python3/dist-packages/osm_nbi/engine.py", line 277, in upload_content + return self.map_topic[topic].upload_content( + File "/usr/lib/python3/dist-packages/osm_nbi/descriptor_topics.py", line 324, in upload_content + tar = tarfile.open(mode="r", fileobj=file_pkg) + File "/usr/lib/python3.8/tarfile.py", line 1603, in open + return func(name, "r", fileobj, **kwargs) + File "/usr/lib/python3.8/tarfile.py", line 1674, in gzopen + t = cls.taropen(name, mode, fileobj, **kwargs) + File "/usr/lib/python3.8/tarfile.py", line 1651, in taropen + return cls(name, mode, fileobj, **kwargs) + File "/usr/lib/python3.8/tarfile.py", line 1514, in __init__ + self.firstmember = self.next() + File "/usr/lib/python3.8/tarfile.py", line 2318, in next + tarinfo = self.tarinfo.fromtarfile(self) + File "/usr/lib/python3.8/tarfile.py", line 1104, in fromtarfile + buf = tarfile.fileobj.read(BLOCKSIZE) + File "/usr/lib/python3.8/gzip.py", line 292, in read + return self._buffer.read(size) + File "/usr/lib/python3.8/_compression.py", line 68, in readinto + data = self.read(len(byte_view)) + File "/usr/lib/python3.8/gzip.py", line 479, in read + if not self._read_gzip_header(): + File "/usr/lib/python3.8/gzip.py", line 437, in _read_gzip_header + self._read_exact(extra_len) + File "/usr/lib/python3.8/gzip.py", line 416, in _read_exact + raise EOFError("Compressed file ended before the " +EOFError: Compressed file ended before the end-of-stream marker was reached +``` diff --git a/_tmp/osmops-packaging/ns-pkg-list.md b/_tmp/osmops-packaging/ns-pkg-list.md new file mode 100644 index 0000000..b36fb86 --- /dev/null +++ b/_tmp/osmops-packaging/ns-pkg-list.md @@ -0,0 +1,56 @@ +NS package list example +----------------------- + +Example output of a `GET` on `/osm/nsd/v1/ns_descriptors_content`. +Notice this is the same as a `GET` on `/osm/nsd/v1/ns_descriptors`---I +think this is the SOL005 endpoint? + +```yaml +- _admin: + created: 1655475749.560676 + modified: 1655478812.9101527 + onboardingState: ONBOARDED + operationalState: ENABLED + projects_read: + - c9e9cf6f-98a4-45f8-b18d-b70d93422d88 + projects_write: + - c9e9cf6f-98a4-45f8-b18d-b70d93422d88 + storage: + descriptor: openldap_ns/openldap_nsd.yaml + folder: 6cb736be-8a59-4c60-a979-22328b8094d4 + fs: mongo + path: /app/storage/ + pkg-dir: openldap_ns + zipfile: openldap_ns.tar.gz + usageState: NOT_IN_USE + userDefinedData: {} + _id: 6cb736be-8a59-4c60-a979-22328b8094d4 + _links: + nsd_content: + href: /nsd/v1/ns_descriptors/6cb736be-8a59-4c60-a979-22328b8094d4/nsd_content + self: + href: /nsd/v1/ns_descriptors/6cb736be-8a59-4c60-a979-22328b8094d4 + description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + nsdOnboardingState: ONBOARDED + nsdOperationalState: ENABLED + nsdUsageState: NOT_IN_USE + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: true + vnfd-id: + - openldap_knf +``` \ No newline at end of file diff --git a/_tmp/osmops-packaging/osmops-generated/openldap_knf.tar.gz b/_tmp/osmops-packaging/osmops-generated/openldap_knf.tar.gz new file mode 100755 index 0000000..10bbc46 Binary files /dev/null and b/_tmp/osmops-packaging/osmops-generated/openldap_knf.tar.gz differ diff --git a/build/config/project.nix b/build/config/project.nix new file mode 100644 index 0000000..2891bd3 --- /dev/null +++ b/build/config/project.nix @@ -0,0 +1,20 @@ +# +# Project build info. +# +rec +{ + # Absolute path to the repo's root dir. + root = ../../.; + + # The name of this project. Taken to be the name of the repo dir; sort of + # customary for online repos, but change it if you don't like it :-) + # Project derivations like local Haskell packages get added to the + # Nix packages in a set having this name so you can reference them + # easily e.g. `pkgs.my-project.haskell.my-pkg-2`. + # Have a look at `pkgset.nix` to see what winds up in `pkgs.my-project`. + name = baseNameOf (toString root); + + # Absolute path to the directory containing the local source packages + # implementing the project's components. + componentsDir = root + "/components"; +} diff --git a/build/config/version.nix b/build/config/version.nix new file mode 100644 index 0000000..e7c1ac9 --- /dev/null +++ b/build/config/version.nix @@ -0,0 +1,40 @@ +# +# Version of the Nix infrastructure our project uses. +# We pin absolutely everything to make sure no matter what everybody +# gets the exact same build/dev environment, Docker images, etc. +# Reproducibility is king. (TODO need Flakes?) +# +{ + # Arguments to `fetchFromGitHub`, detailing the Nixpkgs source we + # want to use. + nixpkgsGitHub = { + owner = "NixOS"; + repo = "nixpkgs"; + rev = "f6ccdfcd2ac4f2e259d20e378737dcbd0ca7debe"; # (1) + sha256 = "1d2lk7a0l166pvgy0xfdlhxgja986hgn39szn9d1fqamyhxzvbaz"; # (2) + }; + +} +# NOTE +# 1. Nixpkgs commit. +# Git hash of the Nixpkgs commit to fetch. We'll pin our infrastructure +# to the Nix definitions as they were at that commit. Normally it should +# be the latest commit known to work with our project. To get the ID of +# the latest commit on the `nixpkgs-unstable` branch, run +# +# $ git ls-remote https://github.com/nixos/nixpkgs nixpkgs-unstable +# +# A quick way to get a description of a commit is to use the GitHub API +# to GET a JSON object describing the commit associated to the commit +# hash in the URL---short hashes work too. E.g. +# +# https://api.github.com/repos/nixos/nixpkgs/commits/5dbf5f9 +# +# 2. Nixpkgs commit SHA256. +# To figure out the SHA256 of the commit in (1), you could initially +# set it to a made-up one and just let the Nix build bomb out, it'll +# tell you what's the actual SHA256 to use. Or you could run e.g. +# +# $ nix run -f '' nix-prefetch-github -c nix-prefetch-github \ +# --rev 5dbf5f90d97c0af9efd36ecfdb8648e74ce39532 NixOS nixpkgs +# diff --git a/build/default.nix b/build/default.nix new file mode 100644 index 0000000..6f3c131 --- /dev/null +++ b/build/default.nix @@ -0,0 +1,48 @@ +# +# Adapted from https://github.com/c0c0n3/hasnix +# +# TODO: rather go with one of the project templates below? +# - https://github.com/nix-dot-dev/getting-started-nix-template +# - https://github.com/vlktomas/nix-examples +# + +{ + pkgs ? import {} +}: + +let + inherit (pkgs) fetchFromGitHub; + version = import ./config/version.nix; + project = import ./config/project.nix; + +in rec { + + nixpin = fetchFromGitHub version.nixpkgsGitHub; + + fixBrokenPkgsOverlay = self: super: { + kubebuilder = super.callPackage ./pkgs/kubebuilder.nix { }; + }; + + pinnedPkgs = import nixpin { + overlays = [ fixBrokenPkgsOverlay ]; + }; + + devTools = with pinnedPkgs; { + # stuff listed in the source-watcher tute + inherit go kubebuilder kind kubectl kustomize fluxcd; + # TODO ideally we should include docker too... + + # VS code go extension deps + inherit gopls delve gopkgs go-outline gomodifytags impl gotests; + inherit go-tools; # = staticcheck + # missing from nixpkgs: goplay; leaving this out + + # Only needed to connect to the Malaga demo cluster. + inherit openvpn; + }; + + devShell = pinnedPkgs.mkShell { + buildInputs = builtins.attrValues devTools; + }; + +} diff --git a/build/pkgs/kubebuilder.nix b/build/pkgs/kubebuilder.nix new file mode 100644 index 0000000..7a2917c --- /dev/null +++ b/build/pkgs/kubebuilder.nix @@ -0,0 +1,84 @@ +# +# Fix kubebuilder package. It's broken in the nixkpkgs version we're using. +# The problem is just a silly source hash mismatch: +# +# trying https://github.com/kubernetes-sigs/kubebuilder/archive/v3.1.0.tar.gz +# % Total % Received % Xferd Average Speed Time Time Time Current +# Dload Upload Total Spent Left Speed +# 100 135 100 135 0 0 602 0 --:--:-- --:--:-- --:--:-- 602 +# 100 1816k 0 1816k 0 0 2959k 0 --:--:-- --:--:-- --:--:-- 2959k +# unpacking source archive /private/tmp/nix-build-source.drv-0/v3.1.0.tar.gz +# hash mismatch in fixed-output derivation '/nix/store/l2mvac03b398x7jnhbqdf9051k4rsini-source': +# wanted: sha256:1726j2b5jyvllvnk60g6px3g2jyyphd9pc4vgid45mis9b60sh8a +# got: sha256:0bl5ff2cplal6hg75800crhyviamk1ws85sq60h4zg21hzf21y68 +# cannot build derivation '/nix/store/if764s9fl71ihg60sifgr2a9ffp8qb24-kubebuilder-3.1.0.drv': 1 dependencies couldn't be built +# error: build of '/nix/store/if764s9fl71ihg60sifgr2a9ffp8qb24-kubebuilder-3.1.0.drv' failed +# +# I tried fixing it with an overlay, but that didn't work: +# +# fixBrokenPkgsOverlay = self: super: { +# kubebuilder = super.kubebuilder.overrideAttrs (oldAttrs: rec { +# version = "3.1.0"; +# src = super.fetchFromGitHub { +# owner = "kubernetes-sigs"; +# repo = "kubebuilder"; +# rev = "v${version}"; +# sha256 = "0bl5ff2cplal6hg75800crhyviamk1ws85sq60h4zg21hzf21y68"; +# }; +# }); +# }; +# +# I think the problem could be that `overrideAttrs` works with `mkDerivation`, +# but the kubebuilder package uses `buildGoModule`? In fact, with the above +# overlay I get the exact same error as if the `src` attribute hasn't been +# overridden. +# +{ lib +, buildGoModule +, fetchFromGitHub +, installShellFiles +, makeWrapper +, git +, go +}: + +buildGoModule rec { + pname = "kubebuilder"; + version = "3.1.0"; + + src = fetchFromGitHub { + owner = "kubernetes-sigs"; + repo = "kubebuilder"; + rev = "v${version}"; + sha256 = "0bl5ff2cplal6hg75800crhyviamk1ws85sq60h4zg21hzf21y68"; + }; + vendorSha256 = "0zxyd950ksjswja64rfri5v2yaalfg6qmq8215ildgrcavl9974n"; + + subPackages = ["cmd" "pkg/..."]; + + preBuild = '' + export buildFlagsArray+=("-ldflags=-X main.kubeBuilderVersion=v${version} \ + -X main.goos=$GOOS \ + -X main.goarch=$GOARCH \ + -X main.gitCommit=v${version} \ + -X main.buildDate=v${version}") + ''; + + doCheck = true; + + postInstall = '' + mv $out/bin/cmd $out/bin/kubebuilder + wrapProgram $out/bin/kubebuilder \ + --prefix PATH : ${lib.makeBinPath [ go ]} + ''; + + allowGoReference = true; + nativeBuildInputs = [ makeWrapper git ]; + + meta = with lib; { + homepage = "https://github.com/kubernetes-sigs/kubebuilder"; + description = "SDK for building Kubernetes APIs using CRDs"; + license = licenses.asl20; + maintainers = with maintainers; [ cmars ]; + }; +} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 0000000..e69de29 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000..eeb5bd7 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: source-system +resources: + - ../rbac + - ../manager + - github.com/fluxcd/source-controller/config//crd?ref=v0.2.0 + - github.com/fluxcd/source-controller/config//manager?ref=v0.2.0 + - namespace.yaml diff --git a/config/default/namespace.yaml b/config/default/namespace.yaml new file mode 100644 index 0000000..6743d7f --- /dev/null +++ b/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller + name: source-system diff --git a/config/manager/deployment.yaml b/config/manager/deployment.yaml new file mode 100644 index 0000000..a7eb627 --- /dev/null +++ b/config/manager/deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: source-watcher + labels: + control-plane: controller +spec: + selector: + matchLabels: + app: source-watcher + replicas: 1 + template: + metadata: + labels: + app: source-watcher + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: manager + image: source-watcher + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ports: + - containerPort: 8080 + name: http-prom + env: + - name: RUNTIME_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - --log-level=info + - --log-json + - --enable-leader-election + livenessProbe: + httpGet: + port: http + path: / + readinessProbe: + httpGet: + port: http + path: / + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 50m + memory: 64Mi + volumeMounts: + - name: tmp + mountPath: /tmp + volumes: + - name: tmp + emptyDir: {} + diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000..883979b --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- deployment.yaml +images: +- name: source-watcher + newName: controller + newTag: latest diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..eaa9257 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..eaa7915 --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,32 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..eed1690 --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..c8c9924 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,22 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: source-reader +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - gitrepositories + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - gitrepositories/status + verbs: + - get diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..60cf644 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: source-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: source-reader +subjects: +- kind: ServiceAccount + name: default + namespace: source-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: source-writter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: default + namespace: source-system diff --git a/controllers/gitrepository_predicate.go b/controllers/gitrepository_predicate.go new file mode 100644 index 0000000..ac89349 --- /dev/null +++ b/controllers/gitrepository_predicate.go @@ -0,0 +1,67 @@ +/* +Copyright 2020, 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +) + +// GitRepositoryRevisionChangePredicate triggers an update event +// when a GitRepository revision changes. +type GitRepositoryRevisionChangePredicate struct { + predicate.Funcs +} + +func (GitRepositoryRevisionChangePredicate) Create(e event.CreateEvent) bool { + src, ok := e.Object.(sourcev1.Source) + + if !ok || src.GetArtifact() == nil { + return false + } + + return true +} + +func (GitRepositoryRevisionChangePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + + oldSource, ok := e.ObjectOld.(sourcev1.Source) + if !ok { + return false + } + + newSource, ok := e.ObjectNew.(sourcev1.Source) + if !ok { + return false + } + + if oldSource.GetArtifact() == nil && newSource.GetArtifact() != nil { + return true + } + + if oldSource.GetArtifact() != nil && newSource.GetArtifact() != nil && + oldSource.GetArtifact().Revision != newSource.GetArtifact().Revision { + return true + } + + return false +} diff --git a/controllers/gitrepository_watcher.go b/controllers/gitrepository_watcher.go new file mode 100644 index 0000000..18a6852 --- /dev/null +++ b/controllers/gitrepository_watcher.go @@ -0,0 +1,139 @@ +/* +Copyright 2020, 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/untar" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + "github.com/martel-innovate/osmops/osmops/engine" +) + +// GitRepositoryWatcher watches GitRepository objects for revision changes +type GitRepositoryWatcher struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get + +func (r *GitRepositoryWatcher) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logr.FromContext(ctx) + + // get source object + var repository sourcev1.GitRepository + if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log.Info("New revision detected", "revision", repository.Status.Artifact.Revision) + + // create tmp dir + tmpDir, err := ioutil.TempDir("", repository.Name) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create temp dir, error: %w", err) + } + defer os.RemoveAll(tmpDir) + + // download and extract artifact + summary, err := r.fetchArtifact(ctx, repository, tmpDir) + if err != nil { + log.Error(err, "unable to fetch artifact") + return ctrl.Result{}, err + } + log.Info(summary) + + if engine, err := engine.New(ctx, tmpDir); err != nil { + // no need to log engine init error, engine.New already does that. + return ctrl.Result{}, err + } else { + engine.Reconcile() + // TODO figure out if we should actually return some kind of (partial) + // error if some of the reconciliation ops fail. + } + + // // list artifact content + // files, err := ioutil.ReadDir(tmpDir) + // if err != nil { + // return ctrl.Result{}, fmt.Errorf("failed to list files, error: %w", err) + // } + + // // do something with the artifact content + // for _, f := range files { + // log.Info("Processing " + f.Name()) + // } + + return ctrl.Result{}, nil +} + +func (r *GitRepositoryWatcher) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.GitRepository{}, builder.WithPredicates(GitRepositoryRevisionChangePredicate{})). + Complete(r) +} + +func (r *GitRepositoryWatcher) fetchArtifact(ctx context.Context, repository sourcev1.GitRepository, dir string) (string, error) { + if repository.Status.Artifact == nil { + return "", fmt.Errorf("respository %s does not containt an artifact", repository.Name) + } + + url := repository.Status.Artifact.URL + + // for local run: + // kubectl -n flux-system port-forward svc/source-controller 8080:80 + // export SOURCE_HOST=localhost:8080 + if hostname := os.Getenv("SOURCE_HOST"); hostname != "" { + url = fmt.Sprintf("http://%s/gitrepository/%s/%s/latest.tar.gz", hostname, repository.Namespace, repository.Name) + } + + // download the tarball + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", fmt.Errorf("failed to create HTTP request, error: %w", err) + } + + resp, err := http.DefaultClient.Do(req.WithContext(ctx)) + if err != nil { + return "", fmt.Errorf("failed to download artifact from %s, error: %w", url, err) + } + defer resp.Body.Close() + + // check response + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to download artifact, status: %s", resp.Status) + } + + // extract + summary, err := untar.Untar(resp.Body, dir) + if err != nil { + return "", fmt.Errorf("faild to untar artifact, error: %w", err) + } + + return summary, nil +} diff --git a/docs/arch/README.md b/docs/arch/README.md new file mode 100644 index 0000000..bc8147f --- /dev/null +++ b/docs/arch/README.md @@ -0,0 +1,54 @@ +OSM Ops Software Architecture +----------------------------- +> A technical map of the software. + +This document describes (only) the **technical** aspects of the OSM +Ops architecture through a set of interlocked architectural viewpoints. +The document is mainly aimed at developers who need to understand +the big picture before modifying the architecture or extending the +code with new functionality. + + +### Document status + +**Work in progress**. Even though this document is a first draft and +many sections need to be written, the included material should be +enough to gain a basic understanding of the OSM Ops architecture. + + +### Prerequisites + +We assume the reader is well versed in distributed systems and cloud +computing. Moreover, the reader should be familiar with the following +technologies: HTTP/REST, Docker, Kubernetes (in particular the Operator +architecture), Go, Kubebuilder, IaC/DevOps/GitOps, FluxCD, Open Source +MANO. + + +### Table of contents + +1. [Introduction][intro]. The basic ideas are summarised here and then + further developed in later sections. +2. [System requirements][requirements]. An account of functional + requirements and system quality attributes. +3. [Information model][info-model]. What information the system handles + and how it is represented and processed. +4. [System decomposition][components]. Components, interfaces and + modularity. +5. [Interaction mechanics][interaction]. Distributed communication + protocols and synchronisation, caching. +6. [Implementation][implementation]. Codebase essentials. +7. [Deployment and scalability][deployment]. +8. [Quality assurance][qa]. + + + + +[components]: ./components.md +[deployment]: ./deployment.md +[implementation]: ./implementation.md +[info-model]: ./info-model.md +[interaction]: ./interaction.md +[intro]: ./intro.md +[qa]: ./qa.md +[requirements]: ./requirements.md diff --git a/docs/arch/arch.context.png b/docs/arch/arch.context.png new file mode 100644 index 0000000..514a133 Binary files /dev/null and b/docs/arch/arch.context.png differ diff --git a/docs/arch/arch.impl-overview.png b/docs/arch/arch.impl-overview.png new file mode 100644 index 0000000..f7703ab Binary files /dev/null and b/docs/arch/arch.impl-overview.png differ diff --git a/docs/arch/components.md b/docs/arch/components.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/deployment.md b/docs/arch/deployment.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/implementation.md b/docs/arch/implementation.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/info-model.md b/docs/arch/info-model.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/interaction.md b/docs/arch/interaction.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/intro.md b/docs/arch/intro.md new file mode 100644 index 0000000..c24a64c --- /dev/null +++ b/docs/arch/intro.md @@ -0,0 +1,207 @@ +Introduction +------------ +> The why, the what and the how. + +This introductory section first touches on project motivation and goals, +then goes on to sketching the architecture conceptual model and how +it has been implemented through a Kubernetes operator plugged into +the FluxCD framework. + + +### Motivation and goals + +The [Affordable5G][a5g] project adopts Open Source MANO (OSM) to +virtualise and orchestrate network functions, simplify infrastructure +operation, and achieve faster service deployment. One of the Affordable5G +objectives is to explore the continuous delivery of services through +GitOps workflows whereby the state of an OSM Kubernetes deployment +is described by version-controlled text files which a tool then interprets +to achieve the desired deployment state in the live OSM cluster. +Although OSM features a sophisticated toolset for the packaging, +deployment and operation of services, GitOps workflows for Kubernetes +network functions (KNFs) are not fully supported yet. Hence the need, +within Affordable5G, of a software to complement OSM’s capabilities +with GitOps. + +Automated, version-controlled service delivery has several benefits. +Automation shortens deployment time and ensures reproducibility of +deployment states. In turn, reproducibility dramatically reduces the +time needed to recover from severe production incidents caused by +faulty deployments as the OSM cluster can swiftly be reverted to a +previous, known-to-be-working deployment state stored in the Git +repository. Thus, overall cluster stability and service availability +are enhanced. Moreover, the Git repository stores information about +who modified the OSM cluster state when, thus furnishing an audit +trail that may help to detect security breaches and failure to comply +with regulations such as GDPR. + + +### Conceptual model + +OSM Ops is a cloud-native micro-service to implement GitOps workflows +within OSM. The basic idea is to describe the state of an OSM deployment +through version-controlled text files hosted in an online Git repository. +Each file declares a desired instantiation and runtime configuration +for some of the services in a specified OSM cluster. Collectively, +the files at a given Git revision describe the deployment state of +these services at a certain point in time. OSM Ops monitors the Git +repository in order to automatically reconcile the desired deployment +state with the actual live state of the OSM cluster. OSM Ops is implemented +as a [Kubernetes][k8s] operator that plugs into the [FluxCD][flux] +framework in order to leverage the rich Kubernetes/FluxCD GitOps +ecosystem. The following visual illustrates the context in which +OSM Ops operates and exemplifies the GitOps workflow resulting in +the creation and update of KNFs from version-controlled deployment +declarations. + +![Architecture context diagram.][dia.ctx] + +From the system administrator's perspective, the GitOps workflow is +as follows. She initially installs, through OSM packages, the deployment +descriptors (typically NSD, VNFD and KDU) for each KNF that she would +like to operate. Each KDU references a Helm chart describing the Kubernetes +resources (service, deployment, etc.) which constitute a KNF. To instantiate +a KNF, OSM has to be able to fetch the corresponding Helm chart. Usually, +Helm charts are maintained in an online repository that the system +administrator takes care of connecting to OSM—e.g. by adding the repository +to the OSM database with the `osm` command line tool. Likewise, since +Helm charts, in turn, reference container images, the system administrator +has to make sure that all the images required for her KNFs can be +downloaded from within the OSM cluster. The usual arrangement here +is that a container registry service provides the needed images to +OSM. Again, the system administrator takes care of this initial setup +step. The diagram exemplifies these initial installation and setup +steps with two sets of OSM deployment descriptors, one for OpenLDAP +and the other for TensorFlow, each referencing their respective Helm +charts in an online Git repository and, in turn, the charts reference +container images in a public Docker registry. + +After provisioning KNF descriptors, the system administrator can then +edit text files to declare the desired deployment state of her KNFs. +Soon after she commits these files to the OSM Ops descriptor repository, +a background reconciliation process is set in motion that ultimately +results in NS instances running in the OSM cluster with the desired +deployment configuration. The diagram depicts a scenario where the +system administrator commits a new revision, `v6`, to the OSM Ops +descriptor repository. The `v6` files collectively declare that the +OSM cluster should run an OpenLDAP KNF with three replicas and a +TensorFlow KNF with one replica. As hinted by the diagram, the last +time the reconciliation process ran, it realised the deployment configuration +declared in revision `v5` which demanded an OpenLDAP KNF with two +replicas. Therefore to realise the `v6` configuration, the outcome +of the reconciliation process should be that another replica is added +to the existing OpenLDAP KNF and a brand new TensorFlow KNF is created +with one replica. + +We now turn our attention to the reconciliation process that runs +behind the scenes. FluxCD detects any changes to the OSM Ops descriptor +repository and forwards new revisions to OSM Ops for processing. On +receiving a new revision, OSM Ops determines which KNFs to create +and which to update. It then calls the OSM cluster manager to actually +create or update the KNFs declared in that revision. In turn, the OSM +cluster manager orchestrates calls to Helm and Kubernetes to fulfill +the requested create and update operations which usually also involve +fetching Helm charts from a repository and pulling container images. +The diagram illustrates the reconciliation process for revision `v6`. +(Bear in mind, the diagram shows a conceptual, high-level message +flow, the next section provides a more accurate description.) + + +### Implementation overview + +Having defined the abstract ideas, we are now ready to explain how +they have been realised. In a nutshell, OSM Ops is a Kubernetes operator +that gets notified of any changes to an online Git repository monitored +by FluxCD and then uses OSM’s north-bound interface (NBI) to realise +the KNF deployment configurations found in that repository. + +These deployment configurations are declared through OSM Ops YAML +files. Each file specifies a desired instantiation and runtime configuration +(e.g. number of replicas) of a KNF previously defined within OSM by +installing suitable OSM descriptor packages, Helm charts, etc. For +example, the following YAML file demands that the live OSM cluster +run a 2-replica NS instance called `ldap` within the VIM identified +by the given VIM account and that the service be configured according +to the definitions found in the named OSM descriptors—the referenced +NSD, VNFD and KDU are actually defined in the OpenLDAP OSM packages +published by Telefonica. + +```yaml +kind: NsInstance +name: ldap +description: Demo LDAP NS instance +nsdName: openldap_ns +vnfName: openldap +vimAccountName: mylocation1 +kdu: + name: ldap + params: + replicaCount: "2" +``` + +Source Controller is a FluxCD service that, among other things, manages +interactions with online Git repositories—e.g. repositories hosted +on GitHub or GitLab. OSM Ops depends on it both for monitoring repositories +and for fetching the repository content at a given revision. Source +Controller arranges a Kubernetes custom resource for each repository +that it monitors and then polls each repository to detect new revisions. +As soon as a new revision becomes available, Source Controller updates +the corresponding Git repository custom resource in Kubernetes. + +OSM Ops implements the Kubernetes Operator interface to get notified +of any changes to Git repository custom resources. Thus, soon after +Source Controller updates a Git repository custom resource, Kubernetes +dispatches an update event to OSM Ops. This arrangement is akin to +the publish-subscribe pattern often found in messaging systems: Source +Controller, the publisher, sends a message to Kubernetes, the broker, +which results in the broker notifying OSM Ops, the subscriber. The +publisher and the subscriber have no knowledge of each other (no space +coupling) and communication is asynchronous (no time coupling). + +At this point, OSM Ops enters the reconcile phase in which it tries +to align the deployment state declared in the OSM Ops YAML files with +that of the live OSM cluster. It fetches the content of the notified +Git revision from Source Controller as a tarball and then uses OSM's +NBI to transition the OSM cluster to the deployment state declared +in the OSM Ops YAML files found in the tarball. For each file, OSM +Ops determines whether to create or update the KNF specified in the +file and then configures it according to the KNF parameters given +in that file. + +The UML communication diagram below summarises the typical workflow +through which OSM Ops turns the deployment state declared in a Git +repository into actual NS instances. The workflow begins with a system +administrator pushing a new revision, `v6`, to the online Git repository. +It then continues as just explained, with Source Controller updating +the Git custom resource, Kubernetes notifying OSM Ops and OSM Ops +calling the NBI to achieve the deployment state declared in `v6`. + +![Implementation overview.][dia.impl] + + +### Rationale + +What is the rationale behind our design decisions? A few explanatory +words are in order. + +**TODO** +- evaluated two leading GitOps solutions: ArgoCD & FluxCD +- similar capabilities but ArgoCD comes with powerful UI +- convergence: the two projects will likely be merged in the + future—ref merger plans +- FluxCD has better docs about extending it with custom functionality + which is what in the end tipped the balance in its favour +- Go was a natural PL choice b/c FluxCD and K8s libs are both + written in Go + + + + +[a5g]: https://www.affordable5g.eu/ + "Affordable5G" +[dia.ctx]: ./arch.context.png +[dia.impl]: ./arch.impl-overview.png +[flux]: https://fluxcd.io/ + "Flux - the GitOps family of projects" +[k8s]: https://en.wikipedia.org/wiki/Kubernetes + "Kubernetes" \ No newline at end of file diff --git a/docs/arch/qa.md b/docs/arch/qa.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/arch/requirements.md b/docs/arch/requirements.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/demos/README.md b/docs/demos/README.md new file mode 100644 index 0000000..d97b48f --- /dev/null +++ b/docs/demos/README.md @@ -0,0 +1,38 @@ +Demos +----- +> Hands-on journeys through core features and deployment scenarios. + +OSM Ops is a proof-of-concept tool to implement IaC workflows with +OSM. The basic idea behind the tool is to describe the state of an +OSM deployment through version-controlled text files which the tool +interprets to achieve the desired deployment state in a given OSM +cluster. If this is Greek to you, go read the architecture [intro][arch.intro] +before venturing in the madness below :-) + +We've got a few demos you can run yourself to try out OSM Ops. The +overall goal of each demo is the same: show how an admin could just +edit YAML files in GitHub to create, update and configure KNFs in her +OSM cluster along the lines of the examples in the [intro][arch.intro]—you've +read that, haven't you :-) But each demo happens in a slightly different +deployment setting. So here are the demos: + +* [Local clusters][demo.local]. Build a local OSM release 10 cluster + and a separate Kind cluster to host OSM Ops. Then simulate commits + to this repository to watch OSM Ops create and update an OpenLDAP + KNF. All that in the comfort of your laptop. +* [Dev mode][demo.dev]. Same as above, but OSM Ops now runs directly on + your box, outside the cluster. Comes in handy if you want to easily + debug OSM Ops to see what it's actually up to. +* [Malaga][demo.malaga]. Demo given at the Affordable5G official review + in November 2021 with a fully-fledged deployment in Malaga. +* [Packaging][demo.pack]. Build on the local clusters demo to showcase + how OSM Ops can also create and update OSM packages from sources. + + + + +[arch.intro]: ../arch/intro.md +[demo.dev]: ./dev-mode.md +[demo.local]: ./local-clusters.md +[demo.malaga]: ./malaga.md +[demo.pack]: ./pack.md \ No newline at end of file diff --git a/docs/demos/demo.local-clusters.png b/docs/demos/demo.local-clusters.png new file mode 100644 index 0000000..ab6883b Binary files /dev/null and b/docs/demos/demo.local-clusters.png differ diff --git a/docs/demos/demo.malaga.png b/docs/demos/demo.malaga.png new file mode 100644 index 0000000..aada8b2 Binary files /dev/null and b/docs/demos/demo.malaga.png differ diff --git a/docs/demos/dev-mode.md b/docs/demos/dev-mode.md new file mode 100644 index 0000000..159afc7 --- /dev/null +++ b/docs/demos/dev-mode.md @@ -0,0 +1,192 @@ +Dev mode +-------- +> Run OSM GitOps on your box, building everything from scratch. + +Build a local OSM release 10 cluster and a separate Kind cluster to +host FluxCD. Build and run OSM Ops directly on your box. Then simulate +commits to this repository to watch OSM Ops create and update an +OpenLDAP KNF. + +Notice this demo is almost the same as the [Local clusters][demo.local] +demo, except OSM Ops now runs directly on your box, outside the Kind +cluster. This setup comes in handy if you want to easily debug OSM +Ops to see what it's actually up to. + + + +## Build & deploy + +As in the [Local clusters][demo.local] demo, we'll build one Multipass +VM hosting an OSM release 10 cluster configured with a Kubernetes VIM. +We'll also build a Kind cluster, but unlike the [Local clusters][demo.local] +demo, the cluster will only host Source Controller. We'll run OSM +Ops directly on localhost and connect it to Source Controller through +a proxy. + + +### Before you start... + +Same requirements as in the [Local clusters][demo.local] demo. + + +### OSM cluster + +Build and run it just like in the [Local clusters][demo.local] demo. + + +### Kind cluster + +Open a terminal in this repo's root dir, then create a Kind cluster +and deploy Source Controller in it: + +```bash +# nix-shell starts a Nix shell with all the tools we're going to need. +$ nix-shell + +$ kind create cluster --name dev +$ flux check --pre +$ flux install \ + --namespace=flux-system \ + --network-policy=false \ + --components=source-controller +``` + +Then have `kubeclt` proxy calls to port `8181` on localhost to the +Source Controller service inside the cluster: + +```bash +$ kubectl -n flux-system port-forward svc/source-controller 8181:80 +``` + +Keep this terminal open since the proxy process will have to be up +for the entire duration of the demo. In fact, OSM Ops will run on +localhost and connect to port `8181` to talk to Source Controller. + + +### OSM Ops + +We'll run OSM Ops outside the cluster. Open a new terminal in the +repo root dir and run + +```bash +$ nix-shell +$ export SOURCE_HOST=localhost:8181 +$ make run +``` + +The `SOURCE_HOST` environment variable tells OSM Ops to connect to +Source Controller on localhost at port `8181`. Keep this terminal +open since the OSM Ops process will have to be up for the entire +duration of the demo. + +Similar to the [Local clusters][demo.local] demo, we're going to have +OSM Ops create and update an NS instance (OpenLDAP KNF) by looking +at the OSM GitOps files in this repo's `_deployment_` dir, but at +tags `test.0.0.3` and `test.0.0.4` instead of `test.0.0.5` and `test.0.0.6`. +The `osm_ops_config.yaml` file is the same for both `test.0.0.3` and +`test.0.0.4` and points to an NBI connection file sitting on the same +box where OSM Ops runs: `/tmp/osm_ops_secret.yaml`. You need to +create this file on your box's `/tmp` directory with the following +content: + +```yaml +hostname: 192.168.64.19:80 +project: admin +user: admin +password: admin +``` + +but replace `192.168.64.19` with the OSM IP address you noted down +earlier. + + + +## Doing GitOps with OSM + +The workflow here is basically the same as in the [Local clusters][demo.local] +demo. In fact, we're going to create and then update the same OpenLDAP +KNF through OSM Ops YAML files, except we'll use the files at tags +`test.0.0.3` and `test.0.0.4` instead of `test.0.0.5` and `test.0.0.6`. + + +### Setting up the OSM Ops pipeline + +Open yet another terminal in the repo root dir, then create a test +GitHub source within Flux + +```bash +$ nix-shell +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.3 +``` + +This command creates a Kubernetes GitRepository custom resource. As +soon as Source Controller gets notified of this new custom resource, +it'll fetch the content of `test.0.0.3` and make it available to OSM +Ops which will then realise the deployment config declared in the +YAML file in our local OSM cluster running on Multipass. + +How does OSM Ops find Source Controller? Like we saw earlier OSM Ops +got configured to connect to Source Controller on localhost at port +`8181`. What about OSM? As explained earlier, both tags `test.0.0.3` +and `test.0.0.4` come with an `osm_ops_config.yaml` file that says +the NBI connection file sits on localhost: `/tmp/osm_ops_secret.yaml`. + + +### Watching reconciliation as it happens + +Now if you switch back to the terminal running OSM Ops, you should +be able to see it processing the files in the `_deployment_` dir as +it was at tag `test.0.0.3`. It should call OSM NBI to create an NS +instance using the OSM OpenLDAP package we uploaded earlier with two +replicas as specified in the `ldap.ops.yaml` in `_deployment_/kdu`. +It's going to take a while for the deployment state to reflect in +the OSM Web UI, but you can check what's going on under the bonnet +by shelling into the OSM VM + +```bash +$ multipass shell osm +% kubectl get ns +# ^ pick the one that looks like an UUID +% kubectl -n fada443a-905c-4241-8a33-4dcdbdac55e7 get pods +# ... you should see two pods being created for the OpenLDAP service +``` + +**Important**. Wait until the two Kubernetes pods are up and running +and the deployment state got updated in the OSM Web UI before moving +on to the next step. + + +### Updating the deployment configuration + +Just like in the [Local clusters][demo.local] demo, we're going to +simulate a commit to change the number replicas in `ldap.ops.yaml` +to `1` and watch OSM Ops trigger a change in the OSM cluster that'll +make the pods scale down to one. To simulate that commit, just make +Flux switch to tag `test.0.0.4` + +```bash +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.4 +``` + +The content of `ldap.ops.yaml` at tag `test.0.0.4` is the same as that +of tag `test.0.0.3` except for the replica count which is `1`. So you +should see that eventually your NS instance for the OpenLDAP service +gets scaled down to one Kubernetes pod. Be patient, unless you've got +a beefy box, this too will take a while. + + + +### Clean up + +Kill all the processes running in your terminals, then zap the two +clusters as explained in the clean up of the [Local clusters][demo.local] +demo. + + + + +[demo.local]: ./local-clusters.md diff --git a/docs/demos/local-clusters.md b/docs/demos/local-clusters.md new file mode 100644 index 0000000..c2d4a47 --- /dev/null +++ b/docs/demos/local-clusters.md @@ -0,0 +1,404 @@ +Local clusters +-------------- +> Run OSM GitOps on your box, building everything from scratch. + +Build a local OSM release 10 cluster and a separate Kind cluster to +host OSM Ops. Then simulate commits to this repository to watch OSM +Ops create and update an OpenLDAP KNF. All that in the comfort of +your laptop. + + + +## Build & deploy + +So we're going to start from scratch and work our way up to run a +fully-fledged OSM GitOps pipeline on your box. Roll up your sleeves, +we'll build two clusters in this section! Here's what our testbed will +look like when we're done: + +![Deployment diagram.][dia.depl] + +The one cluster is a Kubernetes Kind cluster that hosts FluxCD's own +Source Controller and our OSM Ops service, both in the `flux-system` +namespace. The other cluster is a release 10 OSM cluster configured +with a Kubernetes VIM, all inside a Multipass (QEMU) VM. Both beasts +run on your box and connect through a local bridge network. Source +Controller monitors this very OSM Ops repository you're looking at +on GitHub. The OSM Ops service connects both to Source Controller, +within the Kind cluster, and to the OSM north-bound interface (NBI) +running inside the Multipass VM. + + +### Before you start... + +Clone this repo locally, then install + +* Nix - https://nixos.org/guides/install-nix.html +* Docker >= 19.03 +* Multipass >= 1.6.2 + +Keep in mind you're going to need a beefy box to run this demo smoothly. +With lots of effort and patience, I've managed to run it on my 4 core, 8 +GB RAM laptop but my guess is that you'd need a box with at least double +that horse power. + + +### OSM cluster + +Spin up an Ubuntu 18.04 VM with Multipass and install OSM release 10 +in it: + +```bash +$ multipass launch --name osm --cpus 2 --mem 6G --disk 40G 18.04 +$ multipass shell osm +% wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +% chmod +x install_osm.sh +% ./install_osm.sh 2>&1 | tee install.log +% exit +``` + +Notice I couldn't actually install OSM release 10 because of a few +issues with the installer. By the time you try this, hopefully the +OSM guys will have fixed those bugs and you'll have a smooth ride. +But if it gets bumpy, you can try my patched OSM install scripts by +following the steps in [multipass.install.sh][osm-install]. + +Once you've got a base OSM 10 cluster up and running, you've got to +configure [KNF infra for an isolated Kubernetes cluster][osm.knf-setup]: + +```bash +$ multipass shell osm +% wget https://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh +% osm vim-create --name mylocation1 --user u --password p --tenant p \ + --account_type dummy --auth_url http://localhost/dummy +% osm k8scluster-add cluster --creds .kube/config --vim mylocation1 \ + --k8s-nets '{k8s_net1: null}' --version "v1.15.12" \ + --description="Isolated K8s cluster at mylocation1" +% exit +``` + +Also, you've got to add some repos where OSM can fetch Helm charts +from: + +```bash +$ multipass shell osm +% osm repo-add --type helm-chart --description "Bitnami repo" bitnami https://charts.bitnami.com/bitnami +% osm repo-add --type helm-chart --description "Cetic repo" cetic https://cetic.github.io/helm-charts +% osm repo-add --type helm-chart --description "Elastic repo" elastic https://helm.elastic.co +% exit +``` + +When done, upload the OSM OpenLDAP packages we're going to use to create +NS instances. To do that, open a terminal in this repo's root dir, then: + +```bash +$ cd _tmp/osm-pkgs +$ multipass mount ./ osm:/mnt/osm-pkgs +$ multipass shell osm +% cd /mnt/osm-pkgs +% osm nfpkg-create openldap_knf.tar.gz +% osm nspkg-create openldap_ns.tar.gz +% exit +``` + +Note down the VM's IPv4 address where the OSM NBI can be accessed: + +```bash +$ multipass info osm +``` + +It should be the first one on the list, the `192.168.*` one. + + +### Kind cluster + +Open a terminal in this repo's root dir, then create a Kind cluster +and deploy Source Controller in it: + +```bash +# nix-shell starts a Nix shell with all the tools we're going to need. +$ nix-shell + +$ kind create cluster --name dev +$ flux check --pre +$ flux install \ + --namespace=flux-system \ + --network-policy=false \ + --components=source-controller +``` + +Next, build and deploy OSM Ops. First off, build the Go code, create +a Docker image for the service, and upload it to Kind's own local +Docker registry: + +```bash +$ make docker-build +$ kind load docker-image ghcr.io/c0c0n3/osmops:latest --name dev +``` + +We need to tell OSM Ops how to connect to the OSM NBI. Create an +`nbi-connection.yaml` file with the content below + +```yaml +hostname: 192.168.64.19:80 +project: admin +user: admin +password: admin +``` + +but replace `192.168.64.19` with the OSM IP address you noted down +earlier. (The username and password are those of the default OSM +admin user that gets created automatically for you during the OSM +installation.) Since we've got a password there, we'll stash this +config away in a Kubernetes secret: + +```bash +$ kubectl -n flux-system create secret generic nbi-connection \ + --from-file nbi-connection.yaml +``` + +Finally, deploy OSM Ops to the Kind cluster: + +```bash +$ kubectl apply -f _deployment_/osmops.deploy.yaml +``` + +If you open up `osmops.deploy.yaml`, you'll see the OSM Ops service +gets deployed to the same namespace of Source Controller, namely +`flux-system`, and runs under the same account. Also, notice our +secret above becomes available to OSM Ops at + + /etc/osmops/nbi-connection.yaml + +More about it later. + + + +## Doing GitOps with OSM + +After toiling away at prep steps, we're finally ready for some GitOps +action. In fact, we're going to create and then update an OpenLDAP +KNF through OSM Ops YAML files. Specifically, we'll start off with +an initial Git revision of this file + +```yaml +kind: NsInstance +name: ldap +description: Demo LDAP NS instance +nsdName: openldap_ns +vnfName: openldap +vimAccountName: mylocation1 +kdu: + name: ldap + params: + replicaCount: "2" +``` + +which says we want the OSM cluster to run a two-replica OpenLDAP KNF +named `ldap` that OSM should instantiate using the NSD, VNFD and KDU +descriptors found in the OSM packages we installed earlier. Also, +notice the VIM account name is that of the VIM we set up earlier for +the KNF infra. We'll watch OSM Ops process the file and the OSM cluster +end up with a brand new NS instance: an OpenLDAP Kubernetes service +with two pods. Then we'll change the number of replicas in the YAML +to `1`, commit the change to the Git repo and watch OSM Ops trigger +a change in the OSM cluster that'll make the pods scale down to one. +How fun! + + +### Setting up the OSM Ops pipeline + +The OSM Ops show starts as soon as you connect a Git repo through +FluxCD. Out of convenience, we're going to use this very repo on +GitHub starting at tag `test.0.0.5`. In fact, that tagged revision +contains a `_deployment_/kdu/ldap.ops.yaml` file with the YAML above. +Open a terminal in your local repo root dir and create a test Git +source within Flux like so: + +```bash +$ nix-shell +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.5 +``` + +This command creates a Kubernetes GitRepository custom resource. As +soon as Source Controller gets notified of this new custom resource, +it'll fetch the content of `test.0.0.5` and make it available to OSM +Ops which will then realise the deployment config declared in the +YAML file in our local OSM cluster running on Multipass. But how can +OSM Ops know what OSM cluster to connect to? Well, OSM Ops looks for +a YAML config file, `osm_ops_config.yaml`, in the root of the repo +directory tree it gets from Source Controller. At `test.0.0.5`, our +repo has an `osm_ops_config.yaml` in the root dir with this content: + +```yaml +targetDir: _deployment_ +fileExtensions: + - .ops.yaml +connectionFile: /etc/osmops/nbi-connection.yaml +``` + +This configuration tells OSM Ops to get the OSM connection details +from `/etc/osmops/nbi-connection.yaml`. Ha! Remember that Kubernetes +secret mounted on the OSM Ops pod? Yep, that's how it happens! The +other fields tell OSM Ops to look for OSM Ops GitOps files in the +`_deployment_` directory (recursively) and only consider files with +an extension of `.ops.yaml`. + + +### Watching reconciliation as it happens + +Now browse to the OSM Web UI at the Multipass IP address you noted +down earlier (e.g. http://192.168.64.19/) and log in with the OSM +admin user—username: `admin`, password: `admin`. You should be able +to see that OSM is busy creating a new NS instance called `ldap`, +similar to what you see on this screenshot: + +![OSM busy creating the OpenLDAP instance.][osm-ui.busy] + +Depending on how much horse power your box has, this could take a +while—think minutes. Instead of twiddling your thumbs as you wait, +why not have a look at what's happening under the bonnet? Start a +terminal in the repo root dir and have a look at the OSM Ops service +logs as in the example below: + +```bash +$ nix-shell + +# figure out the name of the OSM Ops pod, it's the one starting with +# 'source-watcher'. +$ kubectl -n flux-system get pods +NAME READY STATUS RESTARTS AGE +source-controller-d58957ccd-pj7p8 1/1 Running 0 7m39s +source-watcher-df9cbc8bf-cjxpq 1/1 Running 0 12s + +# then get the logs. +$ kubectl -n flux-system logs source-watcher-df9cbc8bf-cjxpq +``` + +What you see in the logs should be similar to + +```bash +2021-10-06T16:39:25.179Z INFO controller.gitrepository New revision detected {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "revision": "test.0.0.5/59cc9586c318642d9fd2399fa638adb24649d53c"} +2021-10-06T16:39:25.742Z INFO controller.gitrepository Extracted tarball into /tmp/test120670538: 132 files, 38 dirs (362.0482ms) {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system"} +2021-10-06T16:39:26.140Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "file": "/tmp/test120670538/_deployment_/kdu/ldap.ops.yaml"} +``` + +In plain English: you should be able to see OSM Ops detect a new Git +revision of `test.0.0.5`, download its content and then process the +`ldap.ops.yaml` file. To see what's happening in OSM land, shell into +the OSM VM and you should see two pods being created for the OpenLDAP +service: + +```bash +$ multipass shell osm +% kubectl get ns +# ^ pick the one that looks like an UUID +% kubectl -n fada443a-905c-4241-8a33-4dcdbdac55e7 get pods +NAME READY STATUS RESTARTS AGE +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-n9bz2 0/1 ContainerCreating 0 30s +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-x2mmd 0/1 ContainerCreating 0 30s +``` + +Then some time later the two pods should be fully operational + +```bash +% kubectl -n fada443a-905c-4241-8a33-4dcdbdac55e7 get pods +NAME READY STATUS RESTARTS AGE +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-n9bz2 1/1 Running 0 109s +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-x2mmd 1/1 Running 0 109s +``` + +and eventually that should be reflected in the OSM Web UI too, as in +the screenshot below. + +![OSM done creating the OpenLDAP instance.][osm-ui.done] + +**Important**. Wait until the two Kubernetes pods are up and running +and the deployment state got updated in the OSM Web UI before moving +on to the next step. + + +### Updating the deployment configuration + +As promised, we should change the number of replicas in `ldap.ops.yaml` +to `1`, commit the change to the Git repo and watch OSM Ops trigger +a change in the OSM cluster that'll make the pods scale down to one. +But there's a snag: you can't actually commit to this repo. Stop +jeering, we've got a workaround :-) We can manually force FluxCD to +fetch the revision tagged `test.0.0.6` which has the same content +of `test.0.0.5` except for `ldap.ops.yaml` where the number of replicas +is `1` instead of two—[`test.0.0.6` v `test.0.0.5` diff here][repo.tags-diff]. +Open a terminal in your local repo root dir and run: + +```bash +$ nix-shell +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.6 +``` + +If you then look at the OSM Ops service logs + +```bash +$ kubectl -n flux-system logs source-watcher-df9cbc8bf-cjxpq +``` + +you should be able to spot OSM Ops process the contents of `test.0.0.6` + +```bash +2021-10-06T17:03:45.006Z INFO controller.gitrepository New revision detected {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "revision": "test.0.0.6/37ec18d984e7b0e4e0de98ec0061b955c413e4ef"} +2021-10-06T17:03:45.293Z INFO controller.gitrepository Extracted tarball into /tmp/test535104545: 132 files, 38 dirs (126.3411ms) {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system"} +2021-10-06T17:03:45.326Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "file": "/tmp/test535104545/_deployment_/kdu/ldap.ops.yaml"} +``` + +Meanwhile in OSM land... + +```bash +$ multipass shell osm +% kubectl get ns +# ^ pick the one that looks like an UUID +% kubectl -n fada443a-905c-4241-8a33-4dcdbdac55e7 get pods +NAME READY STATUS RESTARTS AGE +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-n9bz2 1/1 Terminating 0 15m +stable-openldap-1-2-7-0046589243-6f9f8b8f6d-x2mmd 1/1 Running 0 15m +``` + +one of the OpenLDAP pods should get shut down. Eventually the OSM UI +should reflect your NS instance for the OpenLDAP service got scaled +down to one Kubernetes pod. Be patient, unless you've got a beefy +box, this too will take a while. If you then take a look at the NS +operation history for `ldap`, you should see two entries there, one +for the create and the other for the update like in this screenshot: + +![OpenLDAP operations history in OSM.][osm-ui.history] + + + +## Clean up + +Get rid of the Kind cluster with OSM Ops and Source Controller + +```bash +$ kind delete cluster --name dev +``` + +Zap the Multipass OSM VM + +```bash +$ multipass stop osm +$ multipass delete osm +$ multipass purge +``` + + + + +[dia.depl]: ./demo.local-clusters.png +[osm.knf-setup]: https://osm.etsi.org/docs/user-guide/05-osm-usage.html#adding-kubernetes-cluster-to-osm +[osm-install]: ../../_tmp/osm-install/multipass.install.sh +[osm-ui.busy]: ./osm-ui.1.png +[osm-ui.done]: ./osm-ui.2.png +[osm-ui.history]: ./osm-ui.3.png +[repo.tags-diff]: https://github.com/c0c0n3/source-watcher/compare/test.0.0.5...test.0.0.6 diff --git a/docs/demos/malaga.md b/docs/demos/malaga.md new file mode 100644 index 0000000..3d75171 --- /dev/null +++ b/docs/demos/malaga.md @@ -0,0 +1,371 @@ +Affordable5G Malaga Demo +------------------------ +> Showcasing OSM Ops at the Affordable5G official review. + +Use OSM Ops to set up a GitOps pipeline in the Affordable5G Malaga +environment. Then connect a GitHub repo and watch OSM Ops create a +Nemergent KNF in the Malaga cluster. + + +## Setup + +We'll run our demo in the Affordable5G virtual environment in Malaga. +The virtual servers there are already set up with most of the bits +and bobs we're going to need but we still have to roll out our own +OSM Ops stuff before we can demo anything. Here's what the Malaga +environment will look like when we're done: + +![Deployment diagram.][dia.depl] + +As you can see the lay of the land is conceptually similar to that +of the [Local clusters demo][demo.local] we know and love. OSM sits +in its own box aptly called `osm` with an IP of `10.11.23.249` and +is configured with a VIM pointing to a Kubernetes cluster (MicroK8s +flavour) made up of two nodes, creatively called `node1` and `node2`, +with IPs of `10.11.23.96` and `10.11.23.97`, respectively. The Kubernetes +cluster hosts FluxCD's own Source Controller and our OSM Ops service, +both in the `flux-system` namespace. Source Controller monitors an +OSM demo repository on GitHub. The OSM Ops service connects both to +Source Controller, within the same cluster, and to the OSM north-bound +interface (NBI) running on `10.11.23.249`, outside the Kubernetes cluster. + + +### Before you start... + +We're not going to build or install anything on your box (phew!), +all the action will take place in the Malaga environment. To be able +to do stuff with the Malaga boxes from your machine, you've got to +set up a VPN. We use OpenVPN with this config file + +* pfSense-TCP4-9443-MARTEL2_Affordable5G-config.ovpn + +but your set up could be different. Also, if you've cloned this repo +locally and have Nix, there's no need to install OpenVPN. Just `cd` +into your local repo root directory and run + +```bash +$ nix-shell +$ sudo openvpn pfSense-TCP4-9443-MARTEL2_Affordable5G-config.ovpn +# ^ replace w/ your own OpenVPN config file +``` + +You'll need the VPN tunnel to be on to be able to run the commands +in the rest of this document. So keep this terminal open and leave +OpenVPN run in the foreground until we're done. + + +### Kubernetes cluster + +So here's the good news: the Malaga environment comes with a two-node +Kubernetes cluster pre-configured for the Affordable5G demo. Specifically, +there's a MicroK8s (version `1.21.5`) cluster made up of the two boxes +we mentioned earlier—`node1` (`10.11.23.96`) and `node2` (`10.11.23.97`). + +But we still need to take care of our own stuff: + +* install and configure the FluxCD CLI on `node2`; +* deploy FluxCD and OSM Ops services to the Kubernetes cluster; +* configure OSM Ops. + +So here goes! SSH into `node2` + +```bash +$ ssh node2@10.11.23.97 +``` + +and install Nix + +```bash +$ curl -L https://nixos.org/nix/install | sh +$ . /home/node2/.nix-profile/etc/profile.d/nix.sh +``` + +Then download the OSM Ops demo bundle and use it to start a Nix shell +with the tools we'll need for the show + +```bash +$ wget https://github.com/c0c0n3/osmops.demo/archive/refs/tags/a5g-0.1.0.tar.gz +$ tar xzf a5g-0.1.0.tar.gz +$ cd osmops.demo-a5g-0.1.0 +$ nix-shell +``` + +Now there's a snag. The FluxCD command (`flux`) won't work with the +`kubectl` version installed on `node2` and it knows zilch about MicroK8s, +so it can't obviously run `microk8s kubectl` instead of plain `kubectl`. +(See [this blog post][flux-mk8s] about it.) But the Nix shell packs +a `kubectl` version compatible with `flux`, so all we need to do is +make plain `kubectl` use the same config as `microk8s kubectl`. + +```bash +$ mkdir -p ~/.kube +$ ln -s /var/snap/microk8s/current/credentials/client.config ~/.kube/config +``` + +With this little hack in place, we can deploy Source Controller + +```bash +$ flux check --pre +$ flux install \ + --namespace=flux-system \ + --network-policy=false \ + --components=source-controller +``` + +Next up, our very own OSM Ops. First off, we need to tell OSM Ops how +to connect to the OSM NBI running on the `osm` box (`10.11.23.249`). +Create an `nbi-connection.yaml` file + +```bash +$ nano nbi-connection.yaml +``` + +with the content below + +```yaml +hostname: 10.11.23.249:80 +project: admin +user: admin +password: admin +``` + +Since we've got a password there, we'll stash this config away in a +Kubernetes secret: + +```bash +$ kubectl -n flux-system create secret generic nbi-connection \ + --from-file nbi-connection.yaml +``` + +Finally, deploy the OSM Ops service to the Kubernetes cluster + +```bash +$ kubectl apply -f deployment/osmops.deploy.yaml +``` + +If you open up `osmops.deploy.yaml`, you'll see the OSM Ops service +gets deployed to the same namespace of Source Controller, namely +`flux-system`, and runs under the same account. Also, notice our +secret above becomes available to OSM Ops at + + /etc/osmops/nbi-connection.yaml + +More about it later. + + + +### OSM cluster + +The OSM cluster has already been set up for us, yay! In fact, the `osm` +node (`10.11.23.249`) hosts a fully-fledged OSM Release 10 instance +configured with a VIM account called `dummyvim` that's tied to the +Kubernetes (MicroK8s) cluster. Also, the OSM config includes the Helm +chart repos below: + +- https://charts.bitnami.com/bitnami +- https://cetic.github.io/helm-charts +- https://helm.elastic.co +- http://osm-download.etsi.org/ftp/Packages/vnf-onboarding-tf/helm/ +- https://pencinarsanz-atos.github.io/nemergent-chart/ + +The last one is actually the only one we care about for this demo +since it hosts the Helm chart for the Nemergent services we're going +to deploy through our GitOps pipeline. To create NS instances from +that chart there have to be an NSD and VNFD in OSM. That's been done +for us too. In fact, there's an NSD called `affordable_nsd` pointing +to a VNFD called `affordable_vnfd`. The VNFD declares a KDU (name: +`nemergent`) referencing the above Helm repo. + + + +## Doing GitOps with OSM + +After toiling away at prep steps, we're finally ready for some GitOps +action. In fact, we're going to create a Nemergent KNF through OSM Ops +YAML files. Specifically, we'll fetch this YAML from a Git repo + +```yaml +kind: NsInstance +name: nemergent +description: Demo Nemergent NS instance +nsdName: affordable_nsd +vnfName: affordable_vnfd +vimAccountName: dummyvim +kdu: + name: nemergent +``` + +This OSM Ops deployment descriptor says we want the OSM cluster to +run a Nemergent KNF called `nemergent` that OSM should instantiate +using the Nemergent NSD, VNFD and KDU descriptors mentioned earlier. +Also, notice the VIM account name is that of the VIM connected to the +Kubernetes cluster. We'll watch OSM Ops process the file and the OSM +cluster end up with a brand new NS instance: a Nemergent Kubernetes +stateful set with 14 services each running a single pod. + +Ideally we'd demo an update too. That is, show how updating the file +in the Git repo eventually results in a corresponding update to the +cluster state. Unfortunately at the moment the Nemergent Helm chart +doesn't have any KDU params we can tweak, so we can't do the update. +But you can still have a look at the [Local clusters demo][demo.local] +to see how updates work. + + +### Setting up the OSM Ops pipeline + +The OSM Ops show starts as soon as you connect a Git repo through +FluxCD. We published a [repo on GitHub][osmops.demo] that we'll use +for this demo. In fact, we'll start off with the content of the repo +at tag [a5g-0.1.0][a5g-0.1.0]. This Git version contains a `nemergent.ops.yaml` +file in the `deployment/kdu` directory with the YAML above. So go +back to your SSH terminal on `node2` and create a `osmops.demo` Git +source within Flux like so: + +```bash +$ flux create source git osmops.demo \ + --url=https://github.com/c0c0n3/osmops.demo \ + --tag=a5g-0.1.0 +``` + +This command creates a Kubernetes GitRepository custom resource. As +soon as Source Controller gets notified of this new custom resource, +it'll fetch the content of `a5g-0.1.0` and make it available to OSM +Ops which will then realise the deployment config declared in the +YAML file in the OSM-managed Kubernetes cluster. But how can OSM Ops +know what OSM cluster to connect to? Well, OSM Ops looks for a YAML +config file, `osm_ops_config.yaml`, in the root of the repo directory +tree it gets from Source Controller. At `a5g-0.1.0`, our demo repo +has an `osm_ops_config.yaml` in the root dir with this content: + +```yaml +targetDir: deployment +fileExtensions: + - .ops.yaml +connectionFile: /etc/osmops/nbi-connection.yaml +``` + +This configuration tells OSM Ops to get the OSM connection details +from `/etc/osmops/nbi-connection.yaml`. Ha! Remember that Kubernetes +secret mounted on the OSM Ops pod? Yep, that's how it happens! The +other fields tell OSM Ops to look for OSM Ops GitOps files in the +`deployment` directory (recursively) and only consider files with +an extension of `.ops.yaml`. + + +### Watching reconciliation as it happens + +Now browse to the OSM Web UI at http://10.11.23.249/ and log in with +the OSM admin user—username: `admin`, password: `admin`. You should +be able to see that OSM is busy creating a new NS instance called +`nemergent`, similar to what you see on this screenshot: + +![OSM busy creating the Nemergent instance.][osm-ui.busy] + +This could take a little while. Instead of twiddling your thumbs as +you wait, why not have a look at what's happening under the bonnet? +Go back to your SSH terminal on `node2` and have a look at the OSM +Ops service logs as in the example below: + +```bash +# figure out the name of the OSM Ops pod, it's the one starting with +# 'source-watcher'. +$ kubectl -n flux-system get pods +NAME READY STATUS RESTARTS AGE +source-controller-d58957ccd-p5994 1/1 Running 0 2d23h +source-watcher-5494d664d5-v66rf 1/1 Running 0 5h27m + +# then get the logs. +$ kubectl -n flux-system logs source-watcher-5494d664d5-v66rf +``` + +What you see in the logs should be similar to + +```bash +2021-11-01T14:58:25.770Z INFO controller.gitrepository New revision detected {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "osmops.demo", "namespace": "flux-system", "revision": "a5g-0.1.0/019aefa83f185700ad5c8e11bfd5d91599a5b39a"} +2021-11-01T14:58:25.772Z INFO controller.gitrepository Extracted tarball into /tmp/osmops.demo875847309: 5 files, 3 dirs (546.733µs) {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "osmops.demo", "namespace": "flux-system"} +2021-11-01T14:58:25.773Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "osmops.demo", "namespace": "flux-system", "file": "/tmp/osmops.demo875847309/deployment/kdu/nemergent.ops.yaml"} +``` + +In plain English: you should be able to see OSM Ops detect a new Git +revision of `a5g-0.1.0`, download its content and then process the +`nemergent.ops.yaml` file. If you look at the Kubernetes pods in the +OSM namespace, you should be able to see 14 pods being created for +the Nemergent NS instance: + +```bash +$ kubectl get ns +# ^ pick the one that looks like an UUID +$ kubectl -n 2b091f50-0555-4296-afe8-d825cc2b19f6 get pods +NAME READY STATUS RESTARTS AGE +scscf-0 0/1 ContainerCreating 0 11s +http-proxy-0 0/1 ContainerCreating 0 11s +pcscf-0 0/1 ContainerCreating 0 11s +rtp-engine-0 0/1 ContainerCreating 0 11s +pas-0 0/1 ContainerCreating 0 12s +idms-0 0/1 ContainerCreating 0 11s +hss-0 0/1 ContainerCreating 0 10s +icscf-0 0/1 ContainerCreating 0 10s +db-0 0/1 ContainerCreating 0 10s +cas-0 0/1 ContainerCreating 0 11s +cms-0 0/1 ContainerCreating 0 10s +kms-0 0/1 ContainerCreating 0 10s +redis-0 0/1 ContainerCreating 0 10s +enabler-ws-0 0/1 ContainerCreating 0 11s +``` + +Then some time later the 14 pods should be fully operational + +```bash +kubectl -n 2b091f50-0555-4296-afe8-d825cc2b19f6 get pods +NAME READY STATUS RESTARTS AGE +scscf-0 1/1 Running 0 38s +http-proxy-0 1/1 Running 0 38s +pcscf-0 1/1 Running 0 38s +rtp-engine-0 1/1 Running 0 38s +pas-0 1/1 Running 0 38s +idms-0 1/1 Running 0 38s +hss-0 1/1 Running 0 37s +icscf-0 1/1 Running 0 38s +db-0 1/1 Running 0 37s +cas-0 1/1 Running 0 38s +cms-0 1/1 Running 0 37s +kms-0 1/1 Running 0 38s +redis-0 1/1 Running 0 37s +enabler-ws-0 1/1 Running 0 38s +``` + +and eventually that should be reflected in the OSM Web UI too, as in +the screenshot below. + +![OSM done creating the Nemergent instance.][osm-ui.done] + + + +## Clean up + +Use the OSM UI (http://10.11.23.249/) to zap the Nemergent NS instance. +Then go back to your SSH terminal on `node2` and run + +```bash +$ kubectl delete -f deployment/osmops.deploy.yaml +$ kubectl -n flux-system delete secret nbi-connection +$ flux uninstall --namespace=flux-system +$ cd ~ +$ rm -rf osmops.demo-a5g-0.1.0 +$ rm .kube/config +``` + +Finally, don't forget to kill the OpenVPN process we started at the +beginning of the demo, otherwise your box will stay connected to the +Malaga environment through a VPN tunnel. + + + + +[a5g-0.1.0]: https://github.com/c0c0n3/osmops.demo/tree/a5g-0.1.0 +[dia.depl]: ./demo.malaga.png +[demo.local]: ./local-clusters.md +[flux-mk8s]: https://boxofcables.dev/using-flux2-with-microk8s/ +[osmops.demo]: https://github.com/c0c0n3/osmops.demo +[osm-ui.busy]: ./malaga.osm-ui.1.png +[osm-ui.done]: ./malaga.osm-ui.2.png diff --git a/docs/demos/malaga.osm-ui.1.png b/docs/demos/malaga.osm-ui.1.png new file mode 100644 index 0000000..3e2ce0d Binary files /dev/null and b/docs/demos/malaga.osm-ui.1.png differ diff --git a/docs/demos/malaga.osm-ui.2.png b/docs/demos/malaga.osm-ui.2.png new file mode 100644 index 0000000..13ab232 Binary files /dev/null and b/docs/demos/malaga.osm-ui.2.png differ diff --git a/docs/demos/osm-ui.1.png b/docs/demos/osm-ui.1.png new file mode 100644 index 0000000..a4b801f Binary files /dev/null and b/docs/demos/osm-ui.1.png differ diff --git a/docs/demos/osm-ui.2.png b/docs/demos/osm-ui.2.png new file mode 100644 index 0000000..9aed606 Binary files /dev/null and b/docs/demos/osm-ui.2.png differ diff --git a/docs/demos/osm-ui.3.png b/docs/demos/osm-ui.3.png new file mode 100644 index 0000000..fe34eaf Binary files /dev/null and b/docs/demos/osm-ui.3.png differ diff --git a/docs/demos/pack.md b/docs/demos/pack.md new file mode 100644 index 0000000..508e6a5 --- /dev/null +++ b/docs/demos/pack.md @@ -0,0 +1,209 @@ +Packaging +--------- +> Have OSM Ops manage your OSM packages too! + +Build a local OSM release 10 cluster and a separate Kind cluster to +host OSM Ops. Then simulate commits to this repository to watch OSM +Ops create OpenLDAP KNF & NS packages, instantiate an OpenLDAP KNF, +and finally update the OpenLDAP KNF & NS packages. It all happens +on your laptop! + +Notice this demo builds on the [Local clusters][demo.local] demo to +showcase how OSM Ops can also create and update OSM packages from +sources in a git repo---which, for this demo, is the one you're +reading this page from :-) + + + +## Build & deploy + +As in the [Local clusters][demo.local] demo, we'll build one Multipass +VM hosting an OSM release 10 cluster configured with a Kubernetes VIM +and a Kind cluster to host OSM Ops and FluxCD. Have a look at the diagram +and explanation there to get a handle on the lay of the land. + + +### Before you start... + +Same requirements as in the [Local clusters][demo.local] demo. + + +### OSM cluster + +Follow the steps in the [Local clusters][demo.local] demo up to where +it says to run OSM client to upload the two OSM packages---i.e. +`openldap_knf.tar.gz` and `openldap_ns.tar.gz`. **Skip that part** +where it asks you to upload the packages. In fact, we'll make OSM +Ops create and upload those two packages for us. + + +### Kind cluster + +Build and run it just like in the [Local clusters][demo.local] demo. + + + +## Doing GitOps with OSM + +After toiling away at prep steps, we're finally ready for some GitOps +action. In fact, we're going make OSM Ops create OpenLDAP KNF & NS +packages, instantiate an OpenLDAP KNF, and finally update the OpenLDAP +KNF & NS packages. + +Specifically, we'll start off with the deployment configuration in +this repo at tag [test.0.0.7][test.0.0.7]. At this tag, the repo +contains a [deployment directory][test.0.0.7.deploy] with + +* An [OpenLDAP KNF package source][test.0.0.7.knf]. +* An [OpenLDAP NS package source][test.0.0.7.ns]. +* An [OSM Ops YAML file][test.0.0.7.kdu] requesting the OSM cluster + to run an OpenLDAP KNF instantiated from NSD, VNFD and KDU descriptors + found in the above packages. + +Read the [Local clusters][demo.local] demo's section about GitOps +for an explanation of the OSM Ops YAML file. The way OSM Ops handles +packages is a bit more involved, you can [read here about it][docs.pkgs], +but for the purpose of this demo all you need to know is that OSM +Ops can create or update OSM packages from source directories you +keep in your GitOps repo. Each source directory contains the files +you'd normally use to make an OSM package tarball, except for the +`checksums.txt` file which OSM Ops generates for you when making +the tarball. + +On processing the repo at tag `test.0.0.7`, OSM Ops will create the +OpenLDAP KNF and NS packages in OSM, then make OSM instantiate the +OpenLDAP KNF using the data in the packages just created in OSM. + +After that, we'll simulate a commit to the repo by switching over +to tag `test.0.0.8`. The only changes between tag `test.0.0.7` and +`test.0.0.8` are updated version numbers for the source packages, +from `1.0` to `1.1`, as shown in [this diff][tag-diff]. OSM Ops will +pick up the changes and update both packages in OSM. + + +### Setting up the OSM Ops pipeline + +The OSM Ops show starts as soon as you connect a Git repo through +FluxCD. As mentioned earlier, we're going to use this very repo on +GitHub at tag `test.0.0.7`. Open a terminal in your local repo root +dir and create a test Git source within Flux like so: + +```bash +$ nix-shell +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.7 +``` + +This command creates a Kubernetes GitRepository custom resource. As +soon as Source Controller gets notified of this new custom resource, +it'll fetch the content of `test.0.0.7` and make it available to OSM +Ops which will then realise the deployment config in our local OSM +cluster running on Multipass. As explained in the [Local clusters][demo.local] +demo, OSM Ops figures out which OSM cluster to connect to by reading +the `osm_ops_config.yaml` file in the root of the repo directory tree +it gets from Source Controller. At `test.0.0.7`, the content of that +file is + +```yaml +targetDir: _deployment_ +fileExtensions: + - .ops.yaml +connectionFile: /etc/osmops/nbi-connection.yaml +``` + +This configuration tells OSM Ops to get the OSM connection details +from `/etc/osmops/nbi-connection.yaml`. Ha! Remember that Kubernetes +secret mounted on the OSM Ops pod? Yep, that's how it happens! The +other fields tell OSM Ops to look for OSM Ops GitOps files in the +`_deployment_` directory (recursively) and only consider files with +an extension of `.ops.yaml`. As for OSM package sources, OSM Ops +looks for them in the `osm-pkgs` dir beneath the target dir, which +in our case is: `_deployment_/osm-pkgs`. + + +### Watching reconciliation as it happens + +Now browse to the OSM Web UI at the Multipass IP address you noted +down earlier (e.g. http://192.168.64.19/) and log in with the OSM +admin user—username: `admin`, password: `admin`. You should be able +to see that OSM now has both an OpenLDAP KNF and NS package and is +busy creating a new NS instance called `ldap`, similar to what you +see on the screenshot in the [Local clusters][demo.local] demo. And +as in the [Local clusters][demo.local] demo, if you grab the OSM Ops +logs, you should see what OSM Ops did. The log file should contain +entries similar to the ones below + +```log +2022-06-21T18:34:45.368Z INFO controller.gitrepository New revision detected {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "revision": "test.0.0.7/d3a8cbf812447c05cf44814db40f6c6da86ab49f"} +2022-06-21T18:34:45.951Z INFO controller.gitrepository Extracted tarball into /tmp/test988276587: 233 files, 81 dirs (399.195386ms) {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system"} +2022-06-21T18:34:45.997Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "osm package": "/tmp/test988276587/_deployment_/osm-pkgs/openldap_knf"} +2022-06-21T18:35:05.808Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "osm package": "/tmp/test988276587/_deployment_/osm-pkgs/openldap_ns"} +2022-06-21T18:35:13.872Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "file": "/tmp/test988276587/_deployment_/kdu/ldap.ops.yaml"} +``` + + +### Updating the deployment configuration + +Now we should make some changes to the source packages in the repo +to see OSM Ops update the packages in OSM. As in the Local clusters +demo, we'll use a shortcut: manually force FluxCD to fetch tag `test.0.0.8` +which has the same content as `test.0.0.7` except for the package +versions which are `1.1`. Again, have a look at the [diff between +these two tags][tag-diff]. Open a terminal in your local repo root +dir and run: + +```bash +$ nix-shell +$ flux create source git test \ + --url=https://github.com/c0c0n3/source-watcher \ + --tag=test.0.0.8 +``` + +If you then look at the OSM Ops service logs + +```log +2022-06-21T18:50:42.261Z INFO controller.gitrepository New revision detected {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "revision": "test.0.0.8/2a8ee4439b06d4ac94c64ec187e88f619d6a97d1"} +2022-06-21T18:50:42.415Z INFO controller.gitrepository Extracted tarball into /tmp/test805307342: 233 files, 81 dirs (115.131087ms) {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system"} +2022-06-21T18:50:42.420Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "osm package": "/tmp/test805307342/_deployment_/osm-pkgs/openldap_knf"} +2022-06-21T18:50:42.927Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "osm package": "/tmp/test805307342/_deployment_/osm-pkgs/openldap_ns"} +2022-06-21T18:50:43.921Z INFO controller.gitrepository processing {"reconciler group": "source.toolkit.fluxcd.io", "reconciler kind": "GitRepository", "name": "test", "namespace": "flux-system", "file": "/tmp/test805307342/_deployment_/kdu/ldap.ops.yaml"} +``` + +you should be able to see OSM Ops having processed tag `test.0.0.8`. +In particular the two source packages OpenLDAP KNF and OpenLDAP NS. +Now if you go back to the OSM Web UI and navigate to the NS Packages +page, you should see the OpenLDAP NS package has now version `1.1`, +i.e. exactly what was in the YAML source file in our repo. Likewise, +if you navigate the the VNF Packages page, you should be able to see +the OpenLDAP KNF package's version is now `1.1` too. + + + +## Clean up + +Get rid of the Kind cluster with OSM Ops and Source Controller + +```bash +$ kind delete cluster --name dev +``` + +Zap the Multipass OSM VM + +```bash +$ multipass stop osm +$ multipass delete osm +$ multipass purge +``` + + + + +[demo.local]: ./local-clusters.md +[docs.pkgs]: ../osm-pkgs.md +[tag-diff]: https://github.com/c0c0n3/source-watcher/compare/test.0.0.7...c0c0n3:test.0.0.8 +[test.0.0.7]: https://github.com/c0c0n3/source-watcher/tree/test.0.0.7 +[test.0.0.7.deploy]: https://github.com/c0c0n3/source-watcher/tree/test.0.0.7/_deployment_ +[test.0.0.7.kdu]: https://github.com/c0c0n3/source-watcher/blob/test.0.0.7/_deployment_/kdu/ldap.ops.yaml +[test.0.0.7.knf]: https://github.com/c0c0n3/source-watcher/tree/test.0.0.7/_deployment_/osm-pkgs/openldap_knf +[test.0.0.7.ns]: https://github.com/c0c0n3/source-watcher/tree/test.0.0.7/_deployment_/osm-pkgs/openldap_ns diff --git a/docs/osm-pkgs.md b/docs/osm-pkgs.md new file mode 100644 index 0000000..fcee3c3 --- /dev/null +++ b/docs/osm-pkgs.md @@ -0,0 +1,192 @@ +OSM Package Support +------------------- +> Kinda works, but it could be much better! + +OSM Ops can create or update OSM packages from package source files +in an OSM Ops repo. For this to work, the operator has to arrange +package files into a directory tree according to some naming conventions. +Nothing is configurable at the moment, so you've got to stick to +the naming conventions down to a tee if you want to make OSM Ops +handle your OSM packages. + +Eventually, we could reimplement the packaging functionality properly, +e.g. use a semantic approach (parse, interpret OSM files, etc.) rather +than naming conventions and guesswork. + +Anyway, at the moment this functionality is actually stable and sort +of useable. If you'd like to give OSM Ops a shot at managing your +KNF and NS packages, read on. + + +### TL;DR + +To make OSM Ops create or update OSM packages in your repo: + +1. Put the files that make up a package in a directory right under + `/osm-pkgs` where `target-dir` is the deployment + target directory specified in `osm_ops_config.yaml`. +2. Name the package directory with a `_knf` suffix if it's a KNF + package or `_ns` if it's an NS. +3. Use the directory name (including suffix) as a package ID in + the YAML definitions. + +If package `p2` depends on `p1`, name their directories in such a +way that `p2`'s directory name comes before `p1`'s in alphabetical +order. + + +### How it works + +#### OSM package tree +OSM Ops expects package source files to be in a directory tree rooted +at `/osm-pkgs`. `target-dir` is the deployment target directory, +within your OSM Ops-managed repo, you specify in the `osm_ops_config.yaml` +config file whereas the `osm-pkgs` bit isn't configurable at the moment. +The source files that make up a package have to be in a directory right +under `osm-pkgs`. How you structure the package directory is up to you +(you could have sub-dirs if you wanted) but the way you name it tells +OSM Ops how to handle the package---more about it later. + +Here's an example repo layout with an OSM package tree. + +``` +my-gitops-repo + | -- osm_ops_config.yaml + + -- deployment-target-dir + + -- kdu + | -- ldap.ops.yaml + + -- osm-pkgs + + -- openldap_knf + | -- openldap_vnfd.yaml + + -- openldap_ns + | -- openldap_nsd.yaml + | -- README.md +``` + +`my-gitops-repo` is your repo root dir, e.g. on GitHub it could be +hosted at https://github.com/c0c0n3/my-gitops-repo. `osm_ops_config.yaml` +is the usual OSM Ops config file. In this case it specifies a target +directory of `deployment-target-dir` as in the example below: + +```yaml +targetDir: deployment-target-dir +fileExtensions: + - .ops.yaml +connectionFile: /etc/osmops/nbi-connection.yaml +``` + +`osm-pkgs` contains the source files of two OSM packages, each in +its own directory. One package `openldap_knf` contains the YAML to +define a KNF for an OpenLDAP service. The other, `openldap_ns`, +contains the YAML to define an NS for the OpenLDAP KNF defined by +`openldap_knf` plus a standard README. Notice you don't need to +add an OSM `checksums.txt` file to each package source directory +since OSM Ops does that for you when uploading the package to OSM +as we'll see later. Finally, there's an `ldap.ops.yaml` file with +some instructions for OSM Ops to manage the deployment of the +OpenLDAP service defined through the above KNF and NS packages. + +#### OSM package directory names +At the moment OSM Ops blindly assumes that any sub-directory of +`osm-pkgs` contains either a KNF or NS package. If the directory +name ends with `_knf`, OSM Ops treats the whole directory as a KNF +package. Likewise, if the directory name ends with `_ns`, OSM Ops +treats it as an NS package. (OSM Ops will report an error if the +directory name doesn't have an `_ns` or `_knf` suffix.) + +OSM Ops also relies on another naming convention to figure out the +package ID. In fact, it assumes the directory name is also the package +ID declared in the KNF or NS YAML stanza. + +So to make OSM Ops manage your package source, you have to: + +* name the package directory with a `_knf` suffix if it's a KNF + package or `_ns` if it's an NS; +* use the directory name (including suffix) as a package ID in + the YAML definitions. + +In our example layout above, `openldap_vnfd.yaml` uses the enclosing +directory name as an ID in the VNFD declaration + +```yaml +vnfd: + id: openldap_knf +# ... rest of the file +``` + +And as you've guessed already, `openldap_nsd.yaml` does pretty much +the same + +```yaml +nsd: + nsd: + - id: openldap_ns + name: openldap_ns + vnfd-id: + - openldap_knf +# ... rest of the file +``` + +#### OSM package dependencies +At the moment OSM Ops does **not explicitly** handle dependencies +among OSM packages. But it does process package directories in the +OSM package tree in alphabetical order. This way, the operator can +name package directories in such a way that if package `p2` depends +on `p1`, `p2`'s name comes before `p1`'s in alphabetical order. + +We used this in our example layout. In fact, `openldap_knf` defines +a KNF that's then referenced by `openldap_ns`, so OSM Ops should +process `openldap_knf` before `openldap_ns`. And this is exactly +what happens because `openldap_knf < openldap_ns` in the alphabetical +order. + +#### Processing a package tree +So if there's an OSM package tree directory (`osm-pkgs`), OSM Ops +will create or update any OSM packages found in there. To figure +out whether to create or update a package, OSM Ops queries the NBI +upfront to see what packages are there already. If a package source +is in `osm-pkgs` but not in OSM, then OSM Ops creates the package +in OSM from the `osm-pkgs` source, otherwise it's an update. OSM Ops +will skip processing packages if there's no `osm-pkgs` directory or +it has no sub-directories. + +OSM Ops blindly assumes that any sub-directory `p` of the OSM package +tree root contains the source files of an OSM package. If `p` has +to be created, OSM Ops reads `p`'s contents to make a gzipped tar +archive in the OSM format (including assembling the `checksums.txt` +file) and then streams it to OSM NBI to create the package in OSM. +On the other hand, if `p` is to be updated, OSM Ops tries locating +the YAML file containing `p`'s VNFD or NSD definition, then uploads +that YAML to OSM. + +**NOTE. Package update.** +It's kinda weird the way it works, but most likely I'm missing something. +In fact, our [initial implementation][pr.1] actually uploaded a tarball +to OSM not only for create operations but also for updates. As it turns +out, OSM client does something different when it comes to updating a +package. It tries finding a YAML file in the package dir, blindly assumes +it's a VNFD or NSD and PUTs it in OSM. What if there are other files +in the package? Well, I've got no idea why OSM client does that, but +I've changed our update implementation to be in line with OSM client's. +Have a look at OSM client's [VNFD][osm-client.vnfd] and [NSD][osm-client.nsd] +update implementation. + + +### How it could work + +Surely this is a stopgap solution. Eventually we'll implement proper +(semantic) handling of packages and their dependencies. One obvious +approach would be to: + +* parse OSM package definitions; +* interpret the parsed AST to build a dependency graph; +* extract a DAG `d[k]` for each graph component `g[k]`; +* topologically sort `d[k]` to get a sequence of nodes `s[k]`; +* process `s[k]` sequences in parallel. + + + + +[osm-client.nsd]: https://osm.etsi.org/gitlab/osm/osmclient/-/blob/master/osmclient/sol005/nsd.py +[osm-client.vnfd]: https://osm.etsi.org/gitlab/osm/osmclient/-/blob/master/osmclient/sol005/vnfd.py +[pr.1]: https://github.com/c0c0n3/source-watcher/pull/1 diff --git a/federico.md b/federico.md new file mode 100644 index 0000000..b9f9607 --- /dev/null +++ b/federico.md @@ -0,0 +1,48 @@ +### Federico's notes about what he'd like to do. + +``` +helm-repositories/ + helm-repo1.yaml +vnfd-catalog + fb_magma_knf + | + --- fb_magma_knfd.yaml +ns-catalog + fb_magma_ns + | + --- fb_magma_nsd.yaml +ns + ns.yaml +``` + +`helm-repo1.yaml` + +```yaml +apiVersion: flux.weave.works/v1beta1 +kind: HelmRepository +metadata: + name: magma +spec: + id: magma + url: https://felipevicens.github.io/fb-magma-helm-chart/ +``` + +^ "check that this may be already covered!!!" (Federico, 9 Mar 2021) + +`ns.yaml` + +```yaml +apiVersion: flux.weave.works/v1beta1 +kind: NSResource +Metadata: + name: magma_orc8r +spec: + name: magma_orc8r + nsd: fb_magma_ns + vim: + config: + - + parameters: + - fb_magma_knf: + kdu_model: "stable/openldap:1.2.2" +``` diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..de48623 --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module github.com/martel-innovate/osmops + +go 1.16 + +require ( + github.com/fluxcd/pkg/runtime v0.12.0 + github.com/fluxcd/pkg/untar v0.0.5 + github.com/fluxcd/source-controller/api v0.15.0 + github.com/go-logr/logr v0.4.0 + github.com/go-ozzo/ozzo-validation v3.6.0+incompatible + github.com/json-iterator/go v1.1.11 + github.com/spf13/pflag v1.0.5 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/apimachinery v0.21.1 + k8s.io/client-go v0.21.1 + sigs.k8s.io/controller-runtime v0.9.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c159eae --- /dev/null +++ b/go.sum @@ -0,0 +1,749 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fluxcd/pkg/apis/meta v0.10.0 h1:N7wVGHC1cyPdT87hrDC7UwCwRwnZdQM46PBSLjG2rlE= +github.com/fluxcd/pkg/apis/meta v0.10.0/go.mod h1:CW9X9ijMTpNe7BwnokiUOrLl/h13miwVr/3abEQLbKE= +github.com/fluxcd/pkg/runtime v0.12.0 h1:BPZZ8bBkimpqGAPXqOf3LTaw+tcw6HgbWyCuzbbsJGs= +github.com/fluxcd/pkg/runtime v0.12.0/go.mod h1:EyaTR2TOYcjL5U//C4yH3bt2tvTgIOSXpVRbWxUn/C4= +github.com/fluxcd/pkg/untar v0.0.5 h1:UGI3Ch1UIEIaqQvMicmImL1s9npQa64DJ/ozqHKB7gk= +github.com/fluxcd/pkg/untar v0.0.5/go.mod h1:O6V9+rtl8c1mHBafgqFlJN6zkF1HS5SSYn7RpQJ/nfw= +github.com/fluxcd/source-controller/api v0.15.0 h1:EhuBZb+gLFbOWxX+UQzXqnAO0wUSViJEDcuVscmRoHc= +github.com/fluxcd/source-controller/api v0.15.0/go.mod h1:P1pIkaoIsiCJ/NLC7IBXPb9XEime9NvA1WN4hZu2Of4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= +github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.1 h1:94bbZ5NTjdINJEdzOkpS4vdPhkb1VFpTYC9zh43f75c= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/client-go v0.21.1 h1:bhblWYLZKUu+pm50plvQF8WpY6TXdRRtcS/K9WauOj4= +k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= +sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..68b495e --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2020, 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 0000000..d5a00af --- /dev/null +++ b/main.go @@ -0,0 +1,94 @@ +/* +Copyright 2020, 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + flag "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/runtime/logger" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + "github.com/martel-innovate/osmops/controllers" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(sourcev1.AddToScheme(scheme)) + + // +kubebuilder:scaffold:scheme +} + +func main() { + var ( + metricsAddr string + enableLeaderElection bool + logOptions logger.Options + ) + + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + logOptions.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(logger.NewLogger(logOptions)) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + LeaderElection: enableLeaderElection, + LeaderElectionID: "0cf1c86c.fluxcd.io", + Logger: ctrl.Log, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controllers.GitRepositoryWatcher{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "GitRepositoryWatcher") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } + + // TODO. Add health and readiness endpoints? See: + // https://github.com/kubernetes-sigs/kubebuilder/blob/master/docs/book/src/multiversion-tutorial/testdata/project/main.go#L125 +} diff --git a/osm_ops_config.yaml b/osm_ops_config.yaml new file mode 100644 index 0000000..de6983c --- /dev/null +++ b/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: _deployment_ +fileExtensions: + - .ops.yaml +connectionFile: /etc/osmops/nbi-connection.yaml \ No newline at end of file diff --git a/osmops/cfg/fsvisitor.go b/osmops/cfg/fsvisitor.go new file mode 100644 index 0000000..5183206 --- /dev/null +++ b/osmops/cfg/fsvisitor.go @@ -0,0 +1,94 @@ +// Traversal of the repo target directory tree to process the content of any +// OSM GitOps files found in there. +// +package cfg + +import ( + "io/fs" + "io/ioutil" + "strings" + + u "github.com/martel-innovate/osmops/osmops/util" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// KduNsActionFile is the data passed to the OSM GitOps file visitor. +type KduNsActionFile struct { + FilePath file.AbsPath + Content *KduNsAction +} + +// KduNsActionProcessor is a file visitor that is given, in turn, the content +// of each OSM GitOps file found in the target directory. +type KduNsActionProcessor interface { + // Do something with the current OSM GitOps file, possibly returning an + // error if something goes wrong. + Process(file *KduNsActionFile) error +} + +// KduNsActionRepoScanner has methods to let visitors process OSM GitOps files +// found while traversing the target directory. +type KduNsActionRepoScanner struct { + targetDir file.AbsPath + fileExt []u.NonEmptyStr + readFile func(string) ([]byte, error) // (*) + + // (*) added for testability, so we can sort of mock stuff +} + +// NewKduNsActionRepoScanner instantiates a KduNsActionRepoScanner to +// traverse the target directory configured in the given Store. +func NewKduNsActionRepoScanner(store *Store) *KduNsActionRepoScanner { + return &KduNsActionRepoScanner{ + targetDir: store.RepoTargetDirectory(), + fileExt: store.OpsFileExtensions(), + readFile: ioutil.ReadFile, + } +} + +// Visit scans the repo's OSM Ops target directory recursively, calling the +// specified visitor with the content of each OSM Git Ops file found. +// For now the only kind of Git Ops file OSM Ops can process is a file +// containing KduNsAction YAML. Any I/O errors that happen while traversing +// the target directory tree get collected in the returned error buffer as +// VisitErrors. Ditto for I/O errors that happen when reading or validating +// a Git Ops file as well as any error returned by the visitor. +func (k *KduNsActionRepoScanner) Visit(visitor KduNsActionProcessor) []error { + scanner := file.NewTreeScanner(k.targetDir) + return scanner.Visit(func(node file.TreeNode) error { + if !k.isGitOpsFile(node.FsMeta) { + return nil + } + return k.visitFile(node.NodePath, visitor) + }) +} + +func (k *KduNsActionRepoScanner) isGitOpsFile(info fs.FileInfo) bool { + if !info.IsDir() { + for _, ext := range k.fileExt { + name := strings.ToLower(info.Name()) + if strings.HasSuffix(name, ext.Value()) { + return true + } + } + } + return false +} + +func (k *KduNsActionRepoScanner) visitFile(absPath file.AbsPath, + visitor KduNsActionProcessor) error { + var err error + file := &KduNsActionFile{FilePath: absPath} + + yaml, err := k.readFile(absPath.Value()) + if err != nil { + return err + } + content, err := readKduNsAction(yaml) + if err != nil { + return err + } + file.Content = content + + return visitor.Process(file) +} diff --git a/osmops/cfg/fsvisitor_test.go b/osmops/cfg/fsvisitor_test.go new file mode 100644 index 0000000..9f62726 --- /dev/null +++ b/osmops/cfg/fsvisitor_test.go @@ -0,0 +1,81 @@ +package cfg + +import ( + "fmt" + "path/filepath" + "reflect" + "sort" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +type processor struct { + received []*KduNsActionFile +} + +func (p *processor) Process(file *KduNsActionFile) error { + p.received = append(p.received, file) + if file.Content.Kdu.Name == "k3" { + return fmt.Errorf("k3") + } + return nil +} + +func buildScanner(t *testing.T) *KduNsActionRepoScanner { + var err error + repoRootDir := findTestDataDir(6) + + store, err := NewStore(repoRootDir) + if err != nil { + t.Fatalf("want: new store; got: %v", err) + } + + return NewKduNsActionRepoScanner(store) +} + +func TestVisit(t *testing.T) { + scanner := buildScanner(t) + visitor := &processor{} + errors := scanner.Visit(visitor) + + errorFileNames := []string{} + for _, e := range errors { + if ve, ok := e.(*file.VisitError); ok { + name := filepath.Base(ve.AbsPath) + errorFileNames = append(errorFileNames, name) + } + } + sort.Strings(errorFileNames) + wantErrorFileNames := []string{"k1.ops.yaml", "k3.ops.yaml"} + if !reflect.DeepEqual(wantErrorFileNames, errorFileNames) { + t.Errorf("want error files: %s; got: %s", + wantErrorFileNames, errorFileNames) + } + + visited := []string{} + for _, r := range visitor.received { + visited = append(visited, r.Content.Kdu.Name) + } + sort.Strings(visited) + wantVisited := []string{"k2", "k3"} + if !reflect.DeepEqual(wantVisited, visited) { + t.Errorf("want visited: %s; got: %s", wantVisited, visited) + } +} + +func TestVisitFileIOReadError(t *testing.T) { + scanner := buildScanner(t) + scanner.readFile = func(path string) ([]byte, error) { + return nil, fmt.Errorf("can't read file: %v", path) + } + visitor := &processor{} + + errors := scanner.Visit(visitor) + if len(errors) != 3 { + t.Errorf("want: IO err on k1, k2 and k3 paths; got: %v", errors) + } + if len(visitor.received) != 0 { + t.Errorf("want: no ops files visited; got: %v", visitor.received) + } +} diff --git a/osmops/cfg/store.go b/osmops/cfg/store.go new file mode 100644 index 0000000..ec01c34 --- /dev/null +++ b/osmops/cfg/store.go @@ -0,0 +1,168 @@ +// Access to the OSM Ops program configuration. +// +package cfg + +import ( + "io/ioutil" + "path/filepath" + "strings" + + u "github.com/martel-innovate/osmops/osmops/util" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// Store holds the OSM Ops program configuration read from the OSM Ops +// config and credentials files. +type Store struct { + rootDir file.AbsPath + targetDir file.AbsPath + fileExt []u.NonEmptyStr + osmCreds *OsmConnection +} + +// NewStore reads the program configuration and credentials files, validates +// their content, and packs the content in a Store. If an I/O error happens +// when reading the files or some of the YAML content isn't valid, NewStore +// returns a error. Each YAML type documents what a valid instance is. +func NewStore(repoRootDir file.AbsPath) (*Store, error) { + var err error + var cfg *OpsConfig + s := Store{rootDir: repoRootDir} + + if err = s.rootDir.IsDir(); err != nil { + return nil, err + } + + if cfg, err = readConfig(s.rootDir); err != nil { + return nil, err + } + + s.fileExt = getFileExtensions(cfg) + + if s.targetDir, err = buildTargetDirPath(s.rootDir, cfg); err != nil { + return nil, err + } + if s.osmCreds, err = readCreds(s.rootDir, cfg); err != nil { + return nil, err + } + + return &s, nil +} + +// The name of the YAML file containing the OpsConfig. For now this name is +// hardcoded to "osm_ops_config.yaml" and the file is expected to be in the +// repo root directory. +const OpsConfigFileName = "osm_ops_config.yaml" + +// The name of the sub-directory of RepoTargetDirectory where to look for +// OSM source package directories. For now this is not configurable, if +// there are any OSM package source directories they should be in "t/osm-pkgs" +// where t is the absolute path returned by RepoTargetDirectory. +const OsmPackagesDirName = "osm-pkgs" + +func readConfig(rootDir file.AbsPath) (*OpsConfig, error) { + file := rootDir.Join(OpsConfigFileName) + if fileData, err := ioutil.ReadFile(file.Value()); err != nil { + return nil, err + } else { + return readOpsConfig(fileData) + } +} + +func buildTargetDirPath(rootDir file.AbsPath, cfg *OpsConfig) (file.AbsPath, error) { + target := rootDir.Join(cfg.TargetDir) + if err := target.IsDir(); err != nil { + return target, err + } + return target, nil +} + +func buildCredsDirPath(rootDir file.AbsPath, cfg *OpsConfig) (file.AbsPath, error) { + if filepath.IsAbs(cfg.ConnectionFile) { + return file.ParseAbsPath(cfg.ConnectionFile) + } + return rootDir.Join(cfg.ConnectionFile), nil +} + +func readCreds(rootDir file.AbsPath, cfg *OpsConfig) (*OsmConnection, error) { + var fileData []byte + if credsFile, err := buildCredsDirPath(rootDir, cfg); err != nil { + return nil, err + } else { + if fileData, err = ioutil.ReadFile(credsFile.Value()); err != nil { + return nil, err + } + return readOsmConnection(fileData) + } +} + +// DefaultOpsFileExtensions returns the default file extensions used to filter +// OSM GitOps files: ".osmops.yaml" and ".osmops.yml". +func DefaultOpsFileExtensions() []u.NonEmptyStr { + y1, _ := u.NewNonEmptyStr(".osmops.yaml") + y2, _ := u.NewNonEmptyStr(".osmops.yml") + return []u.NonEmptyStr{y1, y2} +} + +func getFileExtensions(cfg *OpsConfig) []u.NonEmptyStr { + nonEmpty := []u.NonEmptyStr{} + for _, x := range cfg.FileExtensions { + if y, err := u.NewNonEmptyStr(strings.TrimSpace(x)); err == nil { + nonEmpty = append(nonEmpty, y) + } + } + + if len(nonEmpty) > 0 { + return nonEmpty + } + return DefaultOpsFileExtensions() +} + +// RepoRootDirectory returns the absolute path to the repo root directory. +func (s *Store) RepoRootDirectory() file.AbsPath { + return s.rootDir +} + +// RepoTargetDirectory returns the absolute path to the directory within +// the repo where to find OSM Git Ops files. +func (s *Store) RepoTargetDirectory() file.AbsPath { + return s.targetDir +} + +// RepoPkgDirectories lists, in alphabetical order, the sub-directories +// of the OSM package root directory. If there's no OSM package directory, +// RepoPkgDirectories returns an empty list. +// See also: OsmPackagesDirName. +func (s *Store) RepoPkgDirectories() ([]file.AbsPath, error) { + dirs := []file.AbsPath{} + + pkgsDir := s.targetDir.Join(OsmPackagesDirName) + if err := pkgsDir.IsDir(); err != nil { + return dirs, nil + } + + sortedDirNames, err := file.ListSubDirectoryNames(pkgsDir.Value()) + for _, name := range sortedDirNames { // (*) + dirPath := pkgsDir.Join(name) + dirs = append(dirs, dirPath) + } + return dirs, err + + // (*) sortedDirNames is empty if err but never nil. +} + +// OpsFileExtensions returns the file extensions used to filter OSM GitOps +// files within the RepoTargetDirectory. +// If the OpsConfig YAML file contains no extensions field, then the extensions +// will be the DefaultOpsFileExtensions. +// OSM Ops will only look for OSM Git Ops files in the RepoTargetDirectory +// that have one of these extensions. +func (s *Store) OpsFileExtensions() []u.NonEmptyStr { + return s.fileExt +} + +// OsmCredentials returns the OSM connection and credentials details to +// connect to the OSM north-bound interface. +func (s *Store) OsmConnection() *OsmConnection { + return s.osmCreds +} diff --git a/osmops/cfg/store_test.go b/osmops/cfg/store_test.go new file mode 100644 index 0000000..006a219 --- /dev/null +++ b/osmops/cfg/store_test.go @@ -0,0 +1,139 @@ +package cfg + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" + + u "github.com/martel-innovate/osmops/osmops/util" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func findTestDataDir(dirIndex int) file.AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDirName := fmt.Sprintf("test_%d", dirIndex) + testDataDir := filepath.Join(enclosingDir, "store_test_dir", + testDataDirName) + p, _ := file.ParseAbsPath(testDataDir) + + return p +} + +func TestInstantiateStoreWithFullConfig(t *testing.T) { + var err error + repoRootDir := findTestDataDir(1) + s, err := NewStore(repoRootDir) + + if err != nil { + t.Fatalf("want: new store; got: %v", err) + } + + if !reflect.DeepEqual(repoRootDir, s.RepoRootDirectory()) { + t.Errorf("want: %v; got: %v", repoRootDir, s.RepoRootDirectory()) + } + + wantTargetDir := repoRootDir.Join("deploy.me") + if !reflect.DeepEqual(wantTargetDir, s.RepoTargetDirectory()) { + t.Errorf("want: %v; got: %v", wantTargetDir, s.RepoTargetDirectory()) + } + + ext, _ := u.NewNonEmptyStr(".ops.yaml") + wantExts := []u.NonEmptyStr{ext} + if !reflect.DeepEqual(wantExts, s.OpsFileExtensions()) { + t.Errorf("want: %v; got: %v", wantExts, s.OpsFileExtensions()) + } + + wantCreds := &OsmConnection{ + Hostname: "host.ie:8008", Project: "boetie", User: "vans", Password: "*", + } + if !reflect.DeepEqual(wantCreds, s.OsmConnection()) { + t.Errorf("want: %v; got: %v", wantCreds, s.OsmConnection()) + } + +} + +func TestInvalidRepoRootDir(t *testing.T) { + repoRootDir := findTestDataDir(0) + if s, err := NewStore(repoRootDir); err == nil { + t.Errorf("want: no store on invalid root dir; got: %v", s) + } +} + +func TestNoConfigFile(t *testing.T) { + repoRootDir := findTestDataDir(2) + if s, err := NewStore(repoRootDir); err == nil { + t.Errorf("want: no store if no config file found; got: %v", s) + } +} + +func TestNoTargetDirOnFS(t *testing.T) { + repoRootDir := findTestDataDir(3) + if s, err := NewStore(repoRootDir); err == nil { + t.Errorf("want: no store if no target dir exists; got: %v", s) + } +} + +func TestNoConnectionFileOnFS(t *testing.T) { + repoRootDir := findTestDataDir(4) + if s, err := NewStore(repoRootDir); err == nil { + t.Errorf("want: no store if no connection file exists; got: %v", s) + } +} + +func TestDefaultFileExtensions(t *testing.T) { + repoRootDir := findTestDataDir(5) + if s, err := NewStore(repoRootDir); err != nil { + t.Fatalf("want: new store; got: %v", err) + } else { + wantExts := DefaultOpsFileExtensions() + if !reflect.DeepEqual(wantExts, s.OpsFileExtensions()) { + t.Errorf("want: %v; got: %v", wantExts, s.OpsFileExtensions()) + } + } +} + +func TestNoRepoPkgRootDir(t *testing.T) { + repoRootDir := findTestDataDir(1) + store, _ := NewStore(repoRootDir) + + got, err := store.RepoPkgDirectories() + if err != nil { + t.Fatalf("want: empty list; got error: %v", err) + } + if got == nil || len(got) != 0 { + t.Errorf("want: empty list; got: %v", got) + } +} + +func TestRepoPkgRootDirWithNoSubdirs(t *testing.T) { + repoRootDir := findTestDataDir(5) + store, _ := NewStore(repoRootDir) + + got, err := store.RepoPkgDirectories() + if err != nil { + t.Fatalf("want: empty list; got error: %v", err) + } + if got == nil || len(got) != 0 { + t.Errorf("want: empty list; got: %v", got) + } +} + +func TestRepoPkgRootDirWithSubdirs(t *testing.T) { + repoRootDir := findTestDataDir(6) + store, _ := NewStore(repoRootDir) + want := []file.AbsPath{ + repoRootDir.Join("deploy.me/osm-pkgs/p1"), + repoRootDir.Join("deploy.me/osm-pkgs/p2"), + } + + got, err := store.RepoPkgDirectories() + if err != nil { + t.Fatalf("want: %v; got error: %v", want, err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} diff --git a/osmops/cfg/store_test_dir/test_1/deploy.me/secret.yaml b/osmops/cfg/store_test_dir/test_1/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_1/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/cfg/store_test_dir/test_1/osm_ops_config.yaml b/osmops/cfg/store_test_dir/test_1/osm_ops_config.yaml new file mode 100644 index 0000000..fa3d076 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_1/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml diff --git a/osmops/cfg/store_test_dir/test_2/config.yaml b/osmops/cfg/store_test_dir/test_2/config.yaml new file mode 100644 index 0000000..52af0e8 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_2/config.yaml @@ -0,0 +1,5 @@ +# valid config but in the wrong file. +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml \ No newline at end of file diff --git a/osmops/cfg/store_test_dir/test_3/osm_ops_config.yaml b/osmops/cfg/store_test_dir/test_3/osm_ops_config.yaml new file mode 100644 index 0000000..d92e13f --- /dev/null +++ b/osmops/cfg/store_test_dir/test_3/osm_ops_config.yaml @@ -0,0 +1,5 @@ +# specified target dir isn't there. +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml diff --git a/osmops/cfg/store_test_dir/test_4/osm_ops_config.yaml b/osmops/cfg/store_test_dir/test_4/osm_ops_config.yaml new file mode 100644 index 0000000..a0d2bbf --- /dev/null +++ b/osmops/cfg/store_test_dir/test_4/osm_ops_config.yaml @@ -0,0 +1,7 @@ +# specified connection file isn't there. +# notice the config is otherwise valid since the targetDir points to the +# dir enclosing this file which obviously exists. +targetDir: . +fileExtensions: + - .ops.yaml +connectionFile: /no/way/jose/secret.yaml diff --git a/osmops/cfg/store_test_dir/test_5/osm-pkgs/dummy.yaml b/osmops/cfg/store_test_dir/test_5/osm-pkgs/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/cfg/store_test_dir/test_5/osm_ops_config.yaml b/osmops/cfg/store_test_dir/test_5/osm_ops_config.yaml new file mode 100644 index 0000000..5a5b1a8 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_5/osm_ops_config.yaml @@ -0,0 +1,3 @@ +# valid config but no file extensions. +targetDir: . +connectionFile: secret.yaml diff --git a/osmops/cfg/store_test_dir/test_5/secret.yaml b/osmops/cfg/store_test_dir/test_5/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_5/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/ignore1.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/ignore1.yaml new file mode 100644 index 0000000..a6e57f6 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/ignore1.yaml @@ -0,0 +1,3 @@ +some: + - yaml + - "to ignore" diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/k1.ops.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/k1.ops.yaml new file mode 100644 index 0000000..3a331df --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/k1.ops.yaml @@ -0,0 +1,8 @@ +kind: invalid +name: t1 +description: look ma! +nsdName: d1 +vnfName: f1 +vimAccountName: v1 +kdu: + name: k1 \ No newline at end of file diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/osm-pkgs/p1/dummy.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/osm-pkgs/p1/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/osm-pkgs/p2/dummy.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/osm-pkgs/p2/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/ignore2.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/ignore2.yaml new file mode 100644 index 0000000..2305f1c --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/ignore2.yaml @@ -0,0 +1 @@ +some: "yaml to ignore" diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k2.ops.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k2.ops.yaml new file mode 100644 index 0000000..521b24f --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k2.ops.yaml @@ -0,0 +1,7 @@ +kind: NsInstance +name: t2 +nsdName: d2 +vnfName: f2 +vimAccountName: v2 +kdu: + name: k2 \ No newline at end of file diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k3.ops.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k3.ops.yaml new file mode 100644 index 0000000..91dcfb8 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/recurse/k3.ops.yaml @@ -0,0 +1,7 @@ +kind: NsInstance +name: t3 +nsdName: d3 +vnfName: f3 +vimAccountName: v3 +kdu: + name: k3 \ No newline at end of file diff --git a/osmops/cfg/store_test_dir/test_6/deploy.me/secret.yaml b/osmops/cfg/store_test_dir/test_6/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/cfg/store_test_dir/test_6/osm_ops_config.yaml b/osmops/cfg/store_test_dir/test_6/osm_ops_config.yaml new file mode 100644 index 0000000..b491f43 --- /dev/null +++ b/osmops/cfg/store_test_dir/test_6/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml \ No newline at end of file diff --git a/osmops/cfg/yamlreader.go b/osmops/cfg/yamlreader.go new file mode 100644 index 0000000..bcb17ce --- /dev/null +++ b/osmops/cfg/yamlreader.go @@ -0,0 +1,36 @@ +// Convenience functions to read and validate OSM Ops YAML data. +// +package cfg + +import ( + v "github.com/go-ozzo/ozzo-validation" + "gopkg.in/yaml.v2" +) + +func fromBytes(yamlData []byte, out v.Validatable) error { + if err := yaml.Unmarshal(yamlData, out); err != nil { + return err + } + if err := out.Validate(); err != nil { + return err + } + return nil +} + +func readOpsConfig(yamlData []byte) (*OpsConfig, error) { + out := &OpsConfig{} + err := fromBytes(yamlData, out) + return out, err +} + +func readOsmConnection(yamlData []byte) (*OsmConnection, error) { + out := &OsmConnection{} + err := fromBytes(yamlData, out) + return out, err +} + +func readKduNsAction(yamlData []byte) (*KduNsAction, error) { + out := &KduNsAction{} + err := fromBytes(yamlData, out) + return out, err +} diff --git a/osmops/cfg/yamlreader_test.go b/osmops/cfg/yamlreader_test.go new file mode 100644 index 0000000..15ba089 --- /dev/null +++ b/osmops/cfg/yamlreader_test.go @@ -0,0 +1,231 @@ +package cfg + +import ( + "reflect" + "testing" +) + +func TestFromBytesErrorOnInvalidYaml(t *testing.T) { + data := []byte(`x: { y`) + if _, err := readOpsConfig(data); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestReadOpsConfig(t *testing.T) { + data := ` +targetDir: deploy/ment +fileExtensions: + - .x + - .ya.ml +connectionFile: /the/secret/stash.yaml +` + want := &OpsConfig{ + TargetDir: "deploy/ment", + FileExtensions: []string{".x", ".ya.ml"}, + ConnectionFile: "/the/secret/stash.yaml", + } + + got, err := readOpsConfig([]byte(data)) + if err != nil { + t.Errorf("failed to read config object: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestReadOpsConfigMissingTargetDir(t *testing.T) { + data := ` +fileExtensions: + - .x +connectionFile: /the/secret/stash.yaml +` + want := &OpsConfig{ + FileExtensions: []string{".x"}, + ConnectionFile: "/the/secret/stash.yaml", + } + + got, err := readOpsConfig([]byte(data)) + if err != nil { + t.Errorf("failed to read config object: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestReadOpsConfigMissingFileExtensions(t *testing.T) { + data := ` +connectionFile: /the/secret/stash.yaml +` + want := &OpsConfig{ + ConnectionFile: "/the/secret/stash.yaml", + } + + got, err := readOpsConfig([]byte(data)) + if err != nil { + t.Errorf("failed to read config object: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestReadOpsConfigMissingConnectionFile(t *testing.T) { + data := ` +targetDir: deploy/ment +` + got, err := readOpsConfig([]byte(data)) + if err == nil { + t.Errorf("want: validation fail; got: %v", got) + } +} + +func TestReadOsmConnection(t *testing.T) { + data := ` +hostname: osm.dev:8008 +project: pea +user: silly-billy +password: "yo! " +` + want := &OsmConnection{ + Hostname: "osm.dev:8008", + Project: "pea", + User: "silly-billy", + Password: "yo! ", + } + + got, err := readOsmConnection([]byte(data)) + if err != nil { + t.Errorf("failed to read config object: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestReadInvalidOsmConnection(t *testing.T) { + data := ` +hostname: missing.port +user: silly-billy +password: "yo! " +` + got, err := readOsmConnection([]byte(data)) + if err == nil { + t.Errorf("want: validation fail; got: %v", got) + } +} + +func TestReadKduNsAction(t *testing.T) { + data := ` +kind: NsInstance +name: silly billy +description: look ma! +nsdName: nascar +vnfName: WTH +vimAccountName: emacs rocks +kdu: + name: kudu buck + params: + p: 1 + q: 2 +` + want := &KduNsAction{ + Kind: "NsInstance", + Name: "silly billy", + Description: "look ma!", + NsdName: "nascar", + VnfName: "WTH", + VimAccountName: "emacs rocks", + Kdu: Kdu{ + Name: "kudu buck", + Params: map[interface{}]interface{}{ + "p": 1, + "q": 2, + }, + }, + } + + got, err := readKduNsAction([]byte(data)) + if err != nil { + t.Errorf("failed to read config object: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestReadInvalidKduNsAction(t *testing.T) { + data := ` +kind: invalid! +name: silly billy +description: look ma! +nsdName: nascar +vnfName: WTH +vimAccountName: emacs rocks +kdu: + name: kudu buck + params: + p: 1 +` + got, err := readKduNsAction([]byte(data)) + if err == nil { + t.Errorf("want: validation fail; got: %v", got) + } +} + +func TestReadKduNsActionWithNoParams(t *testing.T) { + data := ` +kind: NsInstance +name: silly billy +description: look ma! +nsdName: nascar +vnfName: WTH +vimAccountName: emacs rocks +kdu: + name: kudu buck +` + got, err := readKduNsAction([]byte(data)) + if err != nil { + t.Fatalf("want: data; got: %v", err) + } + if got.Kdu.Params != nil { + t.Errorf("want: nil; got: %+v", got.Kdu.Params) + } +} + +func TestReadKduNsActionWithSingleParam(t *testing.T) { + data := ` +kind: NsInstance +name: t3 +nsdName: d3 +vnfName: f3 +vimAccountName: v3 +kdu: + name: k3 + params: + replicaCount: "3" + +` + got, err := readKduNsAction([]byte(data)) + if err != nil { + t.Fatalf("want: data; got: %v", err) + } + + ps, ok := got.Kdu.Params.(map[interface{}]interface{}) + if !ok { + t.Fatalf("want: params map; got: %+v", got.Kdu.Params) + } + v, ok := ps["replicaCount"] + if !ok { + t.Fatalf("want: replicaCount value; got: not there") + } + sv, ok := v.(string) + if !ok { + t.Fatalf("want: string value; got: %v", v) + } + if sv != "3" { + t.Errorf(`want: "3"; got: "%s"`, sv) + } +} diff --git a/osmops/cfg/yamltypes.go b/osmops/cfg/yamltypes.go new file mode 100644 index 0000000..7bf4074 --- /dev/null +++ b/osmops/cfg/yamltypes.go @@ -0,0 +1,162 @@ +// Go structs that define the YAML data OSM Ops processes as well as +// validation functions. +// +// There are two kinds of YAML data OSM Ops deals with: +// +// * OSM GitOps files. Instructions OSM Ops has to carry out to transition +// the OSM deployment to the desired state. For now the only supported +// instructions are those related to KDU deployment, see `KduNsAction`. +// * Program configuration. Some basic data OSM Ops needs to process GitOps +// files---e.g. OSM client credentials. See `OpsConfig` and `OsmConnection`. +// +// All the stucts implement ozzo-validation's `Validatable` interface to +// validate the data read from YAML files. +// +package cfg + +import ( + v "github.com/go-ozzo/ozzo-validation" + + u "github.com/martel-innovate/osmops/osmops/util" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// OpsConfig holds the configuration data needed to scan a repo to find +// supported OSM Ops deployment files and run OSM commands according to +// the data found in those files. +type OpsConfig struct { + // TargetDir is a path, relative to the repo root, pointing to the + // directory containing OSM Ops YAML files. Defaults to the repo root + // if omitted. + TargetDir string `yaml:"targetDir"` + + // FileExtensions is a list of file extensions that OSM Ops considers + // when reading YAML configuration. OSM Ops looks in the TargetDir for + // OSM Ops YAML files and only reads those having the specified extensions. + // Defaults to `[".osmops.yaml", ".osmops.yml"]` if omitted. + FileExtensions []string `yaml:"fileExtensions"` + + // ConnectionFile is a path to the file containing OSM connection data. + // (See `OsmConnection` structure.) Typically this is an absolute path + // to a separate YAML config file mounted on the pod running OSM Ops + // through a K8s secret. While not recommended, it's also possible to + // keep this file in the repo. In that case, ConnectionFile should be + // a path relative to the repo root directory. + ConnectionFile string `yaml:"connectionFile"` +} + +// Validate OpsConfig data read from a YAML file. +// An instance is valid if: +// * TargetDir is not present or if present isn't empty and is a valid path. +// * ConnectionFile isn't empty and is a valid path. +func (d OpsConfig) Validate() error { + validTargetDir := func(value interface{}) error { // (*) + s, _ := value.(string) + if len(s) == 0 { + return nil + } + return file.IsStringPath(value) + } + return v.ValidateStruct(&d, + v.Field(&d.TargetDir, v.By(validTargetDir)), + v.Field(&d.ConnectionFile, v.By(file.IsStringPath)), + ) + + // (*) the latest ozzo-validation (GH/master) comes w/ conditional + // validation rules, so when they release it, we could replace our + // custom validTargetDir w/ e.g. + // v.When(d.TargetDir != "", v.By(u.IsStringPath)).Else(v.Nil) +} + +// OsmConnection holds the data the OSM client needs to connect to the OSM +// north-bound interface. +type OsmConnection struct { + Hostname string `yaml:"hostname"` + Project string `yaml:"project"` // OSM client default: admin + User string `yaml:"user"` // OSM client default: admin + Password string `yaml:"password"` // OSM client default: admin +} + +// OSM client defaults: +// * user ("--user" CLI option or "OSM_USER" env var): admin +// * password ("--password" CLI option or "OSM_PASSWORD" env var): admin +// * project ("--project" CLI option or "OSM_PROJECT" env var): admin + +// Validate OsmConnection data read from a YAML file. +// The hostname field must be in the form h:p where h is a DNS name or IP +// address and p is a valid port number---i.e. between 0 and 65535. IP6 +// addresses are accepted too but have to be enclosed in square brackets---e.g. +// "[::1]:80", "[::1%lo0]:80". +// The user, password and project fields must not be empty. +func (d OsmConnection) Validate() error { + return v.ValidateStruct(&d, + v.Field(&d.Hostname, v.By(u.IsHostAndPort)), + v.Field(&d.Project, v.Required), + v.Field(&d.User, v.Required), + v.Field(&d.Password, v.Required), + ) +} + +var KduNsActionKind = struct { + u.StrEnum + KIND u.EnumIx +}{ + StrEnum: u.NewStrEnum("NsInstance"), + KIND: 0, +} + +type Kdu struct { + Name string `yaml:"name"` + Params interface{} `yaml:"params"` +} + +func (d Kdu) Validate() error { + return v.ValidateStruct(&d, v.Field(&d.Name, v.Required)) +} + +// KduNsAction holds the data in a YAML file that instructs OSM Ops to run +// an NS action on a KDU. +type KduNsAction struct { + Kind string `yaml:"kind"` + Name string `yaml:"name"` + Description string `yaml:"description"` + NsdName string `yaml:"nsdName"` + VnfName string `yaml:"vnfName"` + VimAccountName string `yaml:"vimAccountName"` + Kdu Kdu `yaml:"kdu"` +} + +// Validate KduNsAction data read from a YAML file. +// An instance is valid if: +// * Kind has a value of KduNsActionKind. +// * Name, NsdName, VnfName, VimAccountName and Kdu.Name are not empty. +func (d KduNsAction) Validate() error { + return v.ValidateStruct(&d, + v.Field(&d.Kind, v.By(KduNsActionKind.Validate)), // (*) + v.Field(&d.Name, v.Required), + v.Field(&d.NsdName, v.Required), + v.Field(&d.VnfName, v.Required), + v.Field(&d.VimAccountName, v.Required), + v.Field(&d.Kdu), + ) + + // (*) ideally it'd be the In rule, but I couldn't get it right, if + // there's no Kind, validation passes! Ditto for the action. +} + +// TODO. Generic handling of OSM files. +// We could actually do much more than KDU create/upgrade and maybe we +// won't even need to write custom YAML wrappers and handle OSM files +// in a more generic way. In fact, we could potentially generate Go +// structs for all OSM "models": +// +// - https://osm.etsi.org/gitlab/osm/im/-/tree/master +// +// using e.g. +// +// - https://github.com/openconfig/ygot +// +// We could implement a code-gen pipeline similar to the one the OSM guys +// have for Python, except for Go: +// +// - https://osm.etsi.org/gitlab/osm/im/-/blob/master/Makefile diff --git a/osmops/cfg/yamltypes_test.go b/osmops/cfg/yamltypes_test.go new file mode 100644 index 0000000..8dd1d5b --- /dev/null +++ b/osmops/cfg/yamltypes_test.go @@ -0,0 +1,192 @@ +package cfg + +import ( + "testing" +) + +var opsConfigValidationFailFixtures = []OpsConfig{ + {TargetDir: "", ConnectionFile: ""}, + {TargetDir: " ", ConnectionFile: "\n"}, + {TargetDir: "valid", ConnectionFile: "\n"}, + {TargetDir: "\t", ConnectionFile: "./val/id"}, +} + +func TestOpsConfigValidationFail(t *testing.T) { + for k, d := range opsConfigValidationFailFixtures { + if got := d.Validate(); got == nil { + t.Errorf("[%d] want: error; got: valid", k) + } + } +} + +var opsConfigValidationOkFixtures = []OpsConfig{ + {ConnectionFile: "./"}, + {TargetDir: "", ConnectionFile: "./"}, + {TargetDir: ".", ConnectionFile: "./"}, + {TargetDir: " /a/", ConnectionFile: "/a/b "}, + {TargetDir: "valid", ConnectionFile: "./val/id"}, + {TargetDir: "\tval/id\n", ConnectionFile: "\n/val/id/\t"}, +} + +func TestOpsConfigValidationOk(t *testing.T) { + for k, d := range opsConfigValidationOkFixtures { + if got := d.Validate(); got != nil { + t.Errorf("[%d] want: valid; got: %s", k, got) + } + } +} + +var osmConnectionValidationFailFixtures = []OsmConnection{ + {Hostname: "", User: "u", Password: "p"}, + {}, {Hostname: "h", Password: "p"}, {Hostname: "h:80", Password: "p"}, + {Hostname: "h:20", User: "u", Password: "p"}, + {Hostname: "h:20", User: "u", Project: "p"}, +} + +func TestOsmConnectionValidationFail(t *testing.T) { + for k, d := range osmConnectionValidationFailFixtures { + if got := d.Validate(); got == nil { + t.Errorf("[%d] want: error; got: valid", k) + } + } +} + +var osmConnectionValidationOkFixtures = []OsmConnection{ + {Hostname: "h:0", Project: "p", User: "u", Password: "p"}, + {Hostname: "h:1", Project: "p", User: "u", Password: "*"}, +} + +func TestOsmConnectionValidationOk(t *testing.T) { + for k, d := range osmConnectionValidationOkFixtures { + if got := d.Validate(); got != nil { + t.Errorf("[%d] want: valid; got: %s", k, got) + } + } +} + +var kduNsActionValidationFailFixtures = []KduNsAction{ + {}, + { + Kind: "NsInstance", + Name: "x", + NsdName: "x", + VnfName: "x", + }, + { + Kind: "NSinsTance", + Name: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Params: "x", + }, + }, + { + Name: "x", + NsdName: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "ain't right", + Name: "x", + Description: "wada wada", + NsdName: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "nsinstance", + VnfName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "NsInstance", + Name: "x", + VnfName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "NsInstance", + Name: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "NsInstance", + Name: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, +} + +func TestKduNsActionValidationFail(t *testing.T) { + for k, d := range kduNsActionValidationFailFixtures { + if got := d.Validate(); got == nil { + t.Errorf("[%d] want: error; got: valid", k) + } + } +} + +var kduNsActionValidationOkFixtures = []KduNsAction{ + { + Kind: "nsiNStance", + Name: "x", + NsdName: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + Params: "x", + }, + }, + { + Kind: "NsInstance", + Name: "x", + Description: "wada wada", + NsdName: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + }, + }, + { + Kind: "nsinstance", + Name: "x", + NsdName: "x", + VnfName: "x", + VimAccountName: "x", + Kdu: Kdu{ + Name: "x", + }, + }, +} + +func TestKduNsActionValidationOk(t *testing.T) { + for k, d := range kduNsActionValidationOkFixtures { + if got := d.Validate(); got != nil { + t.Errorf("[%d] want: valid; got: %s", k, got) + } + } +} diff --git a/osmops/engine/mocks_test.go b/osmops/engine/mocks_test.go new file mode 100644 index 0000000..517ee48 --- /dev/null +++ b/osmops/engine/mocks_test.go @@ -0,0 +1,174 @@ +package engine + +import ( + "context" + "errors" + "path" + "path/filepath" + "sort" + + "github.com/go-logr/logr" + + "github.com/martel-innovate/osmops/osmops/nbic" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// logr.Logger implementation + +type logCollector struct { + entries []logEntry +} + +type logEntry struct { + msg string + params map[interface{}]interface{} + err error +} + +func (c *logCollector) append(err error, msg string, kvs ...interface{}) { + e := logEntry{msg: msg, params: map[interface{}]interface{}{}, err: err} + for i := 0; i < len(kvs)-1; i += 2 { + k, v := kvs[i], kvs[i+1] + e.params[k] = v + } + + c.entries = append(c.entries, e) +} + +func (c *logCollector) Enabled() bool { + return true +} + +func (c *logCollector) Info(msg string, keysAndValues ...interface{}) { + c.append(nil, msg, keysAndValues...) +} + +func (c *logCollector) Error(err error, msg string, keysAndValues ...interface{}) { + c.append(err, msg, keysAndValues...) +} + +func (c *logCollector) V(level int) logr.Logger { + return c +} + +func (c *logCollector) WithValues(keysAndValues ...interface{}) logr.Logger { + return c +} + +func (c *logCollector) WithName(name string) logr.Logger { + return c +} + +// logr.Logger & context factory functions + +func newCtx(logger *logCollector) context.Context { + return logr.NewContext(context.TODO(), logger) +} + +func newLogCollector() *logCollector { + return &logCollector{entries: []logEntry{}} +} + +// logCollector utils + +func (c *logCollector) countEntries() int { + return len(c.entries) +} + +func (c *logCollector) msgAt(ix int) string { + return c.entries[ix].msg +} + +func (c *logCollector) errAt(ix int) error { + return c.entries[ix].err +} + +func (c *logCollector) sortProcessedFileNames() []string { + names := []string{} + for _, e := range c.entries { + if e.msg == processingMsg { + if path, ok := e.params[fileLogKey]; ok { + if p, ok := path.(string); ok { + name := filepath.Base(p) + names = append(names, name) + } + } + } + } + sort.Strings(names) + return names +} + +func (c *logCollector) sortErrorFileNames() []string { + names := []string{} + for _, e := range c.entries { + if e.msg == processingErrMsg { + if err, ok := e.err.(*file.VisitError); ok { + name := filepath.Base(err.AbsPath) + names = append(names, name) + } + } + } + sort.Strings(names) + return names +} + +// nbic.Workflow implementation + +type mockCreateOrUpdate struct { + dataMap map[string]*nbic.NsInstanceContent + processedPkgNames []string +} + +func newMockNbicWorkflow() *mockCreateOrUpdate { + return &mockCreateOrUpdate{ + dataMap: map[string]*nbic.NsInstanceContent{}, + processedPkgNames: []string{}, + } +} + +func (m *mockCreateOrUpdate) CreateOrUpdateNsInstance(data *nbic.NsInstanceContent) error { + m.dataMap[data.KduName] = data + if data.KduName == "k2" { + return errors.New("k2") + } + return nil +} + +func (m *mockCreateOrUpdate) CreateOrUpdatePackage(source file.AbsPath) error { + name := path.Base(source.Value()) + if name == "p1" { + return errors.New("p1") + } + m.processedPkgNames = append(m.processedPkgNames, name) + return nil +} + +// mockCreateOrUpdate utils + +func (m *mockCreateOrUpdate) hasProcessedKdus() bool { + return len(m.dataMap) > 0 +} + +func (m *mockCreateOrUpdate) hasProcessedKdu(name string) bool { + _, ok := m.dataMap[name] + return ok +} + +func (m *mockCreateOrUpdate) dataFor(kduName string) *nbic.NsInstanceContent { + if data, ok := m.dataMap[kduName]; ok { + return data + } + return nil +} + +func (m *mockCreateOrUpdate) lookupParam(kduName string, paramName string) interface{} { + if data := m.dataFor(kduName); data != nil { + if ps, ok := data.KduParams.(map[interface{}]interface{}); ok { + if v, ok := ps[paramName]; ok { + return v + } + } + } + return nil +} diff --git a/osmops/engine/reconcile.go b/osmops/engine/reconcile.go new file mode 100644 index 0000000..6ccc7b1 --- /dev/null +++ b/osmops/engine/reconcile.go @@ -0,0 +1,174 @@ +package engine + +import ( + "context" + + "github.com/go-logr/logr" + + "github.com/martel-innovate/osmops/osmops/cfg" + "github.com/martel-innovate/osmops/osmops/nbic" + u "github.com/martel-innovate/osmops/osmops/util" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +type Engine struct { + ctx context.Context + opsConfig *cfg.Store + nbic nbic.Workflow +} + +func newNbic(opsConfig *cfg.OsmConnection) (nbic.Workflow, error) { + hp, err := u.ParseHostAndPort(opsConfig.Hostname) + if err != nil { + return nil, err + } + + conn := nbic.Connection{ + Address: *hp, + Secure: false, + } + usrCreds := nbic.UserCredentials{ + Username: opsConfig.User, + Password: opsConfig.Password, + Project: opsConfig.Project, + } + + return nbic.New(conn, usrCreds) +} + +func newProcessor(ctx context.Context, repoRootDir string) (*Engine, error) { + rootDir, err := file.ParseAbsPath(repoRootDir) + if err != nil { + return nil, err + } + + store, err := cfg.NewStore(rootDir) + if err != nil { + return nil, err + } + + client, err := newNbic(store.OsmConnection()) + return &Engine{ + ctx: ctx, + opsConfig: store, + nbic: client, + }, err +} + +func log(ctx context.Context) logr.Logger { + return logr.FromContext(ctx) +} + +func (p *Engine) log() logr.Logger { + return log(p.ctx) +} + +func (p *Engine) repoScanner() *cfg.KduNsActionRepoScanner { + return cfg.NewKduNsActionRepoScanner(p.opsConfig) +} + +const ( + processingMsg = "processing" + packageLogKey = "osm package" + fileLogKey = "file" + engineInitErrMsg = "can't initialize reconcile engine" + processingErrMsg = "processing errors" + errorLogKey = "error" +) + +func (p *Engine) processPackages() []error { + es := []error{} + pkgs, err := p.opsConfig.RepoPkgDirectories() + if err != nil { + es = append(es, err) + return es + } + for _, pkgPath := range pkgs { + p.log().Info(processingMsg, packageLogKey, pkgPath.Value()) + + err = p.nbic.CreateOrUpdatePackage(pkgPath) + if err != nil { + es = append(es, err) + } + } + return es +} + +func (p *Engine) Process(file *cfg.KduNsActionFile) error { + p.log().Info(processingMsg, fileLogKey, file.FilePath.Value()) + + data := nbic.NsInstanceContent{ + Name: file.Content.Name, + Description: file.Content.Description, + NsdName: file.Content.NsdName, + VnfName: file.Content.VnfName, + VimAccountName: file.Content.VimAccountName, + KduName: file.Content.Kdu.Name, + KduParams: file.Content.Kdu.Params, + } + return p.nbic.CreateOrUpdateNsInstance(&data) +} + +// New instantiates an Engine to reconcile the state of the OSM deployment +// with that declared in the OSM GitOps files found in the specified repo. +func New(ctx context.Context, repoRootDir string) (*Engine, error) { + engine, err := newProcessor(ctx, repoRootDir) + if err != nil { + log(ctx).Error(err, engineInitErrMsg) + return nil, err + } + return engine, nil +} + +// Reconcile looks for OSM GitOps files in the repo and, for each file +// found, it calls OSM NBI to reach the deployment state declared in the +// file. +// +// Additionally, if there's an OSM package root directory (see: Store), +// Reconcile creates or updates any OSM packages found in there. Reconcile +// blindly assumes that any sub-directory p of the OSM package root directory +// contains the source files of an OSM package. It reads p's contents to +// create a gzipped tar archive in the OSM format (including creating the +// "checksums.txt" file) and then streams it to OSM NBI to create or update +// the package in OSM. (See: nbic.CreateOrUpdatePackage) +// +// Notice at the moment OsmOps does **not explicitly** handle dependencies +// among OSM packages. But it does process sub-directories of the OSM package +// root directory in alphabetical order. This way, the operator can name +// package directories in such a way that if package p2 depends on p1, +// p2's name comes before p1's in alphabetical order. For example, say +// you want to deploy a KNF using two packages: one, p1, contains the actual +// KNF definition whereas the other, p2, contains an NS definition referencing +// p1. Then you could use the following naming scheme: +// +// my-gitops-repo +// | +// + -- deployment-target-dir +// + -- osm-pkgs +// + -- my-service_knf (<- p1's contents) +// | - my-service_vnfd.yaml +// + -- my-service_ns (<- p2's contents) +// | - my-service_nsd.yaml +// | - README.md +// +// Because my-service_knf < my-service_ns (alphabetical order), Reconcile +// will first process my-service_knf and then my-service_ns. +// +// Surely this is a stopgap solution. Eventually we'll implement proper +// handling of package dependencies. (Solution: parse OSM package definitions, +// build dependency graph, extract DAG d[k] for each graph component g[k], +// topologically sort d[k] ~~> s[k]; process s[k] sequences in parallel.) +func (p *Engine) Reconcile() { + errors := p.processPackages() + if len(errors) == 0 { + errors = p.repoScanner().Visit(p) + } + // else stop there since KDU ops might fail b/c referenced packages + // weren't created or updated. + + if len(errors) > 0 { + for k, e := range errors { + p.log().Error(e, processingErrMsg, errorLogKey, k) + } + } +} diff --git a/osmops/engine/reconcile_test.go b/osmops/engine/reconcile_test.go new file mode 100644 index 0000000..f6be24c --- /dev/null +++ b/osmops/engine/reconcile_test.go @@ -0,0 +1,202 @@ +package engine + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/martel-innovate/osmops/osmops/cfg" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func findTestDataDir(dirIndex int) file.AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDirName := fmt.Sprintf("test_%d", dirIndex) + testDataDir := filepath.Join(enclosingDir, "reconcile_test_dir", + testDataDirName) + p, _ := file.ParseAbsPath(testDataDir) + + return p +} + +func TestReconcileFailOnInvalidRootDir(t *testing.T) { + logger := newLogCollector() + + if _, err := New(newCtx(logger), ""); err == nil { + t.Errorf("want: error; got: nil") + } + + if got := logger.countEntries(); got != 1 { + t.Fatalf("want: 1; got: %d", got) + } + if got := logger.msgAt(0); got != engineInitErrMsg { + t.Errorf("want: %s; got: %s", engineInitErrMsg, got) + } +} + +func TestReconcileFailOnInvalidOpsConfig(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(1) + + if _, err := New(newCtx(logger), repoRootDir.Value()); err == nil { + t.Errorf("want: error; got: nil") + } + + if got := logger.countEntries(); got != 1 { + t.Fatalf("want: 1; got: %d", got) + } + if got := logger.msgAt(0); got != engineInitErrMsg { + t.Errorf("want: %s; got: %s", engineInitErrMsg, got) + } + if got, ok := logger.errAt(0).(*fs.PathError); !ok { + t.Errorf("want: path error; got: %v", got) + } +} + +func TestReconcileDoNothingIfNoOsmGitOpsFileNorOsmPkgFound(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(2) + + engine, err := New(newCtx(logger), repoRootDir.Value()) + if err != nil { + t.Errorf("want: engine; got: %v", err) + } + + engine.Reconcile() + if got := logger.countEntries(); got != 0 { + t.Fatalf("want: 0; got: %d", got) + } +} + +func TestReconcileProcessOsmGitOpsFiles(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(3) + mockNbic := newMockNbicWorkflow() + + engine, err := New(newCtx(logger), repoRootDir.Value()) + if err != nil { + t.Errorf("want: engine; got: %v", err) + } + + engine.nbic = mockNbic + engine.Reconcile() + + if mockNbic.hasProcessedKdu("k1") { + t.Errorf("want: skip k1 (invalid content); got: processed") + } + + if data := mockNbic.dataFor("k2"); data == nil { + t.Errorf("want: process k2; got: not processed") + } else { + if data.KduParams != nil { + t.Errorf("want: nil; got: %+v", data.KduParams) + } + } + + if !mockNbic.hasProcessedKdu("k3") { + t.Errorf("want: process k3; got: not processed") + } else { + got := mockNbic.lookupParam("k3", "replicaCount") + if got != "3" { + t.Errorf(`want: "3"; got: %s`, got) + } + } + + if got := logger.countEntries(); got != 4 { + t.Errorf("want: 4; got: %d", got) + } + want := []string{"k2.ops.yaml", "k3.ops.yaml"} + if got := logger.sortProcessedFileNames(); !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } + want = []string{"k1.ops.yaml", "k2.ops.yaml"} + // k2: simulated processing error, see mockCreateOrUpdate + if got := logger.sortErrorFileNames(); !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestNewNbicFailOnInvalidHostAndPort(t *testing.T) { + config := &cfg.OsmConnection{ + Hostname: "missing.port", // never happens b/c of yaml validation + User: "u", + Password: "*", + Project: "p", + } + if _, err := newNbic(config); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestReconcileProcessNoOsmGitOpsFilesOnPackageErr(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(4) + mockNbic := newMockNbicWorkflow() + engine, _ := New(newCtx(logger), repoRootDir.Value()) + engine.nbic = mockNbic + + engine.Reconcile() + + wantProcessedPkgs := []string{"p2"} + if !reflect.DeepEqual(mockNbic.processedPkgNames, wantProcessedPkgs) { + t.Errorf("want processed pkgs: %v; got: %v", wantProcessedPkgs, + mockNbic.processedPkgNames) + } + if mockNbic.hasProcessedKdus() { + t.Errorf("want: skip kdus b/c of prev pkg errors; got: some processed") + } +} + +func TestReconcileProcessStopOnRootPackageDirAccessErr(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(5) + mockNbic := newMockNbicWorkflow() + engine, _ := New(newCtx(logger), repoRootDir.Value()) + engine.nbic = mockNbic + + pkgsDir := repoRootDir.Join("deploy.me/osm-pkgs") + os.Chmod(pkgsDir.Value(), 0200) // processPackages can't scan it + defer os.Chmod(pkgsDir.Value(), 0755) + + engine.Reconcile() + + wantProcessedPkgs := []string{} + if !reflect.DeepEqual(mockNbic.processedPkgNames, wantProcessedPkgs) { + t.Errorf("want processed pkgs: %v; got: %v", wantProcessedPkgs, + mockNbic.processedPkgNames) + } + if mockNbic.hasProcessedKdus() { + t.Errorf("want: skip kdus b/c of prev pkg errors; got: some processed") + } +} + +func TestReconcileProcessPackagesAndOsmGitOpsFiles(t *testing.T) { + logger := newLogCollector() + repoRootDir := findTestDataDir(5) + mockNbic := newMockNbicWorkflow() + engine, _ := New(newCtx(logger), repoRootDir.Value()) + engine.nbic = mockNbic + + engine.Reconcile() + + wantProcessedPkgs := []string{"p2", "p3"} + if !reflect.DeepEqual(mockNbic.processedPkgNames, wantProcessedPkgs) { + t.Errorf("want processed pkgs: %v; got: %v", wantProcessedPkgs, + mockNbic.processedPkgNames) + } + + if mockNbic.hasProcessedKdu("k1") { + t.Errorf("want: skip k1 (invalid content); got: processed") + } + if !mockNbic.hasProcessedKdu("k2") { + t.Errorf("want: process k2; got: not processed") + } + if !mockNbic.hasProcessedKdu("k3") { + t.Errorf("want: process k3; got: not processed") + } +} diff --git a/osmops/engine/reconcile_test_dir/test_1/osm_ops_config.yaml b/osmops/engine/reconcile_test_dir/test_1/osm_ops_config.yaml new file mode 100644 index 0000000..8a460ad --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_1/osm_ops_config.yaml @@ -0,0 +1,7 @@ +# specified connection file isn't there. +# notice the config is otherwise valid since the targetDir points to the +# dir enclosing this file which obviously exists. +targetDir: . +fileExtensions: + - .ops.yaml +connectionFile: /no/way/jose/secret.yaml \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_2/deploy.me/secret.yaml b/osmops/engine/reconcile_test_dir/test_2/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_2/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/engine/reconcile_test_dir/test_2/osm_ops_config.yaml b/osmops/engine/reconcile_test_dir/test_2/osm_ops_config.yaml new file mode 100644 index 0000000..fa3d076 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_2/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml diff --git a/osmops/engine/reconcile_test_dir/test_3/deploy.me/k1.ops.yaml b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k1.ops.yaml new file mode 100644 index 0000000..3a331df --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k1.ops.yaml @@ -0,0 +1,8 @@ +kind: invalid +name: t1 +description: look ma! +nsdName: d1 +vnfName: f1 +vimAccountName: v1 +kdu: + name: k1 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_3/deploy.me/k2.ops.yaml b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k2.ops.yaml new file mode 100644 index 0000000..521b24f --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k2.ops.yaml @@ -0,0 +1,7 @@ +kind: NsInstance +name: t2 +nsdName: d2 +vnfName: f2 +vimAccountName: v2 +kdu: + name: k2 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_3/deploy.me/k3.ops.yaml b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k3.ops.yaml new file mode 100644 index 0000000..0acaf41 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_3/deploy.me/k3.ops.yaml @@ -0,0 +1,9 @@ +kind: NsInstance +name: t3 +nsdName: d3 +vnfName: f3 +vimAccountName: v3 +kdu: + name: k3 + params: + replicaCount: "3" diff --git a/osmops/engine/reconcile_test_dir/test_3/deploy.me/secret.yaml b/osmops/engine/reconcile_test_dir/test_3/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_3/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/engine/reconcile_test_dir/test_3/osm_ops_config.yaml b/osmops/engine/reconcile_test_dir/test_3/osm_ops_config.yaml new file mode 100644 index 0000000..b491f43 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_3/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/k1.ops.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k1.ops.yaml new file mode 100644 index 0000000..3a331df --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k1.ops.yaml @@ -0,0 +1,8 @@ +kind: invalid +name: t1 +description: look ma! +nsdName: d1 +vnfName: f1 +vimAccountName: v1 +kdu: + name: k1 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/k2.ops.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k2.ops.yaml new file mode 100644 index 0000000..521b24f --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k2.ops.yaml @@ -0,0 +1,7 @@ +kind: NsInstance +name: t2 +nsdName: d2 +vnfName: f2 +vimAccountName: v2 +kdu: + name: k2 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/k3.ops.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k3.ops.yaml new file mode 100644 index 0000000..0acaf41 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_4/deploy.me/k3.ops.yaml @@ -0,0 +1,9 @@ +kind: NsInstance +name: t3 +nsdName: d3 +vnfName: f3 +vimAccountName: v3 +kdu: + name: k3 + params: + replicaCount: "3" diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/osm-pkgs/p1/dummy.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/osm-pkgs/p1/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/osm-pkgs/p2/dummy.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/osm-pkgs/p2/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/engine/reconcile_test_dir/test_4/deploy.me/secret.yaml b/osmops/engine/reconcile_test_dir/test_4/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_4/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/engine/reconcile_test_dir/test_4/osm_ops_config.yaml b/osmops/engine/reconcile_test_dir/test_4/osm_ops_config.yaml new file mode 100644 index 0000000..b491f43 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_4/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/k1.ops.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k1.ops.yaml new file mode 100644 index 0000000..3a331df --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k1.ops.yaml @@ -0,0 +1,8 @@ +kind: invalid +name: t1 +description: look ma! +nsdName: d1 +vnfName: f1 +vimAccountName: v1 +kdu: + name: k1 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/k2.ops.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k2.ops.yaml new file mode 100644 index 0000000..521b24f --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k2.ops.yaml @@ -0,0 +1,7 @@ +kind: NsInstance +name: t2 +nsdName: d2 +vnfName: f2 +vimAccountName: v2 +kdu: + name: k2 \ No newline at end of file diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/k3.ops.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k3.ops.yaml new file mode 100644 index 0000000..0acaf41 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_5/deploy.me/k3.ops.yaml @@ -0,0 +1,9 @@ +kind: NsInstance +name: t3 +nsdName: d3 +vnfName: f3 +vimAccountName: v3 +kdu: + name: k3 + params: + replicaCount: "3" diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/osm-pkgs/p2/dummy.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/osm-pkgs/p2/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/osm-pkgs/p3/dummy.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/osm-pkgs/p3/dummy.yaml new file mode 100644 index 0000000..e69de29 diff --git a/osmops/engine/reconcile_test_dir/test_5/deploy.me/secret.yaml b/osmops/engine/reconcile_test_dir/test_5/deploy.me/secret.yaml new file mode 100644 index 0000000..46f94b3 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_5/deploy.me/secret.yaml @@ -0,0 +1,4 @@ +hostname: host.ie:8008 +project: boetie +user: vans +password: '*' diff --git a/osmops/engine/reconcile_test_dir/test_5/osm_ops_config.yaml b/osmops/engine/reconcile_test_dir/test_5/osm_ops_config.yaml new file mode 100644 index 0000000..b491f43 --- /dev/null +++ b/osmops/engine/reconcile_test_dir/test_5/osm_ops_config.yaml @@ -0,0 +1,4 @@ +targetDir: deploy.me +fileExtensions: + - .ops.yaml +connectionFile: deploy.me/secret.yaml \ No newline at end of file diff --git a/osmops/engine/tmp_test.go b/osmops/engine/tmp_test.go new file mode 100644 index 0000000..07807a8 --- /dev/null +++ b/osmops/engine/tmp_test.go @@ -0,0 +1,48 @@ +package engine + +import ( + "gopkg.in/yaml.v2" + + "github.com/martel-innovate/osmops/osmops/nbic" + "github.com/martel-innovate/osmops/osmops/util" +) + +type kduP struct { + Params interface{} `yaml:"params"` +} + +func kduParams() interface{} { + yamlData := []byte(`--- +params: + replicaCount: "2" +`) + + kdu := kduP{} + if err := yaml.Unmarshal(yamlData, &kdu); err != nil { + panic(err) + } + return kdu.Params +} + +func T() { + hp, _ := util.ParseHostAndPort("192.168.64.19:80") + conn := nbic.Connection{Address: *hp, Secure: false} + usrCreds := nbic.UserCredentials{ + Username: "admin", Password: "admin", Project: "admin", + } + client, _ := nbic.New(conn, usrCreds) + + data := nbic.NsInstanceContent{ + Name: "ldap3", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "mylocation1", + VnfName: "openldap", + KduName: "ldap", + KduParams: kduParams(), + } + err := client.CreateOrUpdateNsInstance(&data) + if err != nil { + panic(err) + } +} diff --git a/osmops/nbic/auth.go b/osmops/nbic/auth.go new file mode 100644 index 0000000..0967c88 --- /dev/null +++ b/osmops/nbic/auth.go @@ -0,0 +1,61 @@ +package nbic + +import ( + "errors" + "net/url" + + //lint:ignore ST1001 HTTP EDSL is more readable w/o qualified import + . "github.com/martel-innovate/osmops/osmops/util/http" + "github.com/martel-innovate/osmops/osmops/util/http/sec" +) + +// UserCredentials holds the data needed to request an OSM NBI token. +type UserCredentials struct { + Username string `json:"username"` + Password string `json:"password"` + Project string `json:"project_id"` +} + +type tokenPayloadView struct { // only the response fields we care about. + Id string `json:"id"` + Expires float64 `json:"expires"` +} + +type authMan struct { + creds UserCredentials + endpoint *url.URL + agent ReqSender +} + +// NewAuthz builds a TokenManager to acquire and refresh OSM NBI access tokens. +func NewAuthz(conn Connection, creds UserCredentials, transport ReqSender) ( + *sec.TokenManager, error) { + if transport == nil { + return nil, errors.New("nil transport") + } + + theMan := &authMan{ + creds: creds, + endpoint: conn.Tokens(), + agent: transport, + } + + return sec.NewTokenManager(theMan.acquireToken, &sec.MemoryTokenStore{}) +} + +func (m *authMan) acquireToken() (*sec.Token, error) { + payload := tokenPayloadView{} + _, err := Request( + POST, At(m.endpoint), + Content(MediaType.YAML), // same as what OSM client does + Accept(MediaType.JSON), + JsonBody(m.creds), + ). + SetHandler(ExpectSuccess(), ReadJsonResponse(&payload)). + RunWith(m.agent) + + if err != nil { + return nil, err + } + return sec.NewToken(payload.Id, payload.Expires), nil +} diff --git a/osmops/nbic/auth_test.go b/osmops/nbic/auth_test.go new file mode 100644 index 0000000..d721449 --- /dev/null +++ b/osmops/nbic/auth_test.go @@ -0,0 +1,132 @@ +package nbic + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" + + "github.com/martel-innovate/osmops/osmops/util" +) + +var usrCreds = UserCredentials{ + Username: "admin", Password: "admin", Project: "admin", +} + +func sameCreds(expected UserCredentials, req *http.Request) bool { + got := UserCredentials{} + json.NewDecoder(req.Body).Decode(&got) + return reflect.DeepEqual(expected, got) +} + +func newConn() Connection { + address, _ := util.ParseHostAndPort("localhost:8080") + return Connection{Address: *address} +} + +type mockTransport struct { + received *http.Request + replyWith *http.Response + timesCalled int +} + +func (m *mockTransport) send(req *http.Request) (*http.Response, error) { + m.received = req + m.timesCalled += 1 + return m.replyWith, nil +} + +func TestGetExpiredToken(t *testing.T) { + mock := &mockTransport{ + replyWith: &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(expiredNbiTokenPayload), + }, + } + mngr, _ := NewAuthz(newConn(), usrCreds, mock.send) + + if _, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: nil") + } + if mock.timesCalled != 1 { + t.Errorf("want: 1; got: %d", mock.timesCalled) + } + if !sameCreds(usrCreds, mock.received) { + t.Errorf("want: same usrCreds; got: different") + } +} + +func TestGetValidToken(t *testing.T) { + mock := &mockTransport{ + replyWith: &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(validNbiTokenPayload), + }, + } + mngr, _ := NewAuthz(newConn(), usrCreds, mock.send) + token, err := mngr.GetAccessToken() + + if err != nil { + t.Errorf("want: token; got: %v", err) + } + + wantData := "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2" + if token.String() != wantData { + t.Errorf("want: %s; got: %s", wantData, token.String()) + } + if token.HasExpired() { + t.Errorf("want: still valid; got: expired") + } + + if mock.timesCalled != 1 { + t.Errorf("want: 1; got: %d", mock.timesCalled) + } + if !sameCreds(usrCreds, mock.received) { + t.Errorf("want: same usrCreds; got: different") + } +} + +func TestGetTokenStopIfResponseNotOkay(t *testing.T) { + mock := &mockTransport{ + replyWith: &http.Response{ + StatusCode: 500, + Body: stringReader(validNbiTokenPayload), + }, + } + mngr, _ := NewAuthz(newConn(), usrCreds, mock.send) + if _, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestGetTokenPayloadWithNoTokenFields(t *testing.T) { + mock := &mockTransport{ + replyWith: &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(`{"x": 1}`), + }, + } + mngr, _ := NewAuthz(newConn(), usrCreds, mock.send) + if _, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestGetTokenPayloadDeserializationError(t *testing.T) { + mock := &mockTransport{ + replyWith: &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(`["expecting", "an object", "not an array!"]`), + }, + } + mngr, _ := NewAuthz(newConn(), usrCreds, mock.send) + if _, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestNewAuthzErrorOnNilTransport(t *testing.T) { + if _, err := NewAuthz(Connection{}, UserCredentials{}, nil); err == nil { + t.Errorf("want: error; got: nil") + } +} diff --git a/osmops/nbic/client.go b/osmops/nbic/client.go new file mode 100644 index 0000000..f70e66f --- /dev/null +++ b/osmops/nbic/client.go @@ -0,0 +1,144 @@ +// Client to interact with OSM north-bound interface (NBI). +package nbic + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" + + "github.com/martel-innovate/osmops/osmops/util/file" + + //lint:ignore ST1001 HTTP EDSL is more readable w/o qualified import + . "github.com/martel-innovate/osmops/osmops/util/http" + "github.com/martel-innovate/osmops/osmops/util/http/sec" +) + +// Workflow defines functions to carry out high-level tasks, usually involving +// several NBI calls. +type Workflow interface { + // CreateOrUpdateNsInstance creates or updates an NS instance in OSM + // through NBI. + // + // If there's no instance with the specified name, then a new one gets + // created. Otherwise, it's an update. Notice OSM allows duplicate instance + // names (bug?), hence it's not safe to update an instance given it's + // name---which instance to update if there's more than one with the + // same name? So CreateOrUpdateNsInstance errors out if the given name + // is tied to more than one instance. + // + // For now we only support creating or updating KNFs. For a create or + // update operation to work, the target KNF must've been "on-boarded" + // in OSM already. So there must be, in OSM, a NSD and VNFD for it. + CreateOrUpdateNsInstance(data *NsInstanceContent) error + + // CreateOrUpdatePackage uploads the given package to OSM through NBI. + // + // CreateOrUpdatePackage blindly assumes that the given directory in + // the OSMOps repo contains either a KNF or NS package. If the directory + // name ends with "_knf", CreateOrUpdatePackage treats the whole directory + // as a KNF package. Likewise, if the directory name ends with "_ns", + // CreateOrUpdatePackage treats it as an NS package. (CreateOrUpdatePackage + // will report an error if the directory name doesn't have an "_ns" or + // "_knf" suffix.) + // + // CreateOrUpdatePackage also relies on another naming convention to + // figure out the package ID. In fact, it assumes the directory name + // is also the package ID declared in the KNF or NS YAML stanza. + // + // CreateOrUpdatePackage expects to find the source files of the OSM + // package in the given source directory or subdirectories. It reads, + // recursively, the files in source, creates a gzipped tar archive in + // the OSM format (including creating the "checksums.txt" file) and + // then streams it to OSM NBI to create or update the package in OSM. + CreateOrUpdatePackage(source file.AbsPath) error +} + +const REQUEST_TIMEOUT_SECONDS = 600 + +func newHttpClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // (1) + }, + }, + Timeout: time.Second * REQUEST_TIMEOUT_SECONDS, // (2) + } + // NOTE. + // 1. Man-in-the-middle attacks. OSM client doesn't validate the server + // cert, so we do the same. But this is a huge security loophole since it + // opens the door to man-in-the-middle attacks. + // 2. Request timeout. Always specify it, see + // - https://medium.com/@nate510/don-t-use-go-s-default-http-client-4804cb19f779 +} + +type Session struct { + conn Connection + creds UserCredentials + transport ReqSender + authz *sec.TokenManager + nsdMap nsDescMap + vnfdMap vnfDescMap + vimAccMap vimAccountMap + nsInstMap nsInstanceMap +} + +func New(conn Connection, creds UserCredentials, transport ...ReqSender) ( + *Session, error) { + httpc := newHttpClient() + + agent := httpc.Do + if len(transport) > 0 { + agent = transport[0] + } + + authz, err := NewAuthz(conn, creds, agent) + if err != nil { + return nil, err + } + + return &Session{ + conn: conn, + creds: creds, + transport: agent, + authz: authz, + }, nil +} + +func (c *Session) NbiAccessToken() ReqBuilder { + provider := func() (string, error) { + if token, err := c.authz.GetAccessToken(); err != nil { + return "", err + } else { + return token.String(), nil + } + } + return BearerToken(provider) +} + +func (c *Session) getJson(endpoint *url.URL, data interface{}) ( + *http.Response, error) { + return Request( + GET, At(endpoint), + c.NbiAccessToken(), + Accept(MediaType.JSON), + ). + SetHandler(ExpectSuccess(), ReadJsonResponse(data)). + RunWith(c.transport) +} + +func (c *Session) postJson(endpoint *url.URL, inData interface{}, + outData ...interface{}) (*http.Response, error) { + req := Request( + POST, At(endpoint), + c.NbiAccessToken(), + Accept(MediaType.JSON), + Content(MediaType.YAML), // same as what OSM client does + JsonBody(inData), + ) + if len(outData) > 0 { + req.SetHandler(ExpectSuccess(), ReadJsonResponse(outData[0])) + } + return req.RunWith(c.transport) +} diff --git a/osmops/nbic/client_test.go b/osmops/nbic/client_test.go new file mode 100644 index 0000000..6e983c9 --- /dev/null +++ b/osmops/nbic/client_test.go @@ -0,0 +1,41 @@ +package nbic + +import ( + "testing" +) + +func TestNewNbicErrorOnNilTransport(t *testing.T) { + if client, err := New(newConn(), usrCreds, nil); err == nil { + t.Errorf("want: error; got: %+v", client) + } +} + +func TestNewNbicWithDefaultTransport(t *testing.T) { + client, err := New(newConn(), usrCreds) + if err != nil { + t.Errorf("want: client; got: %v", err) + } + if client.transport == nil { + t.Errorf("want: transport; got: nil") + } +} + +func TestGetJsonStopIfResponseNotOkay(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + if _, err := nbic.getJson(urls.buildUrl("/wrong"), nil); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestPostJsonStopIfResponseNotOkay(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + if _, err := nbic.postJson(urls.buildUrl("/wrong"), "42", nil); err == nil { + t.Errorf("want: error; got: nil") + } +} diff --git a/osmops/nbic/enpoints.go b/osmops/nbic/enpoints.go new file mode 100644 index 0000000..182287b --- /dev/null +++ b/osmops/nbic/enpoints.go @@ -0,0 +1,84 @@ +package nbic + +import ( + "fmt" + "net/url" + + "github.com/martel-innovate/osmops/osmops/util" +) + +// Connection holds the data needed to establish a network connection with +// the OSM NBI. +type Connection struct { + Address util.HostAndPort + Secure bool +} + +func (b Connection) buildUrl(path string) *url.URL { + if url, err := b.Address.BuildHttpUrl(b.Secure, path); err != nil { + panic(err) // see note below + } else { + return url + } +} + +// NOTE. Panic on URL building. +// Ideally buildUrl should return (*url.URL, error) instead of panicing. But +// then it becomes a royal pain in the backside to write code that uses the +// URL functions below and testing for the URL build error case needs to +// happen at every calling site---e.g. if you call Tokens then ideally there +// should be a unit test to check what happens when Tokens returns an error. +// So we take a shortcut with the panic call. As long as we call all the +// URL functions below in our unit tests, we can be sure the panic won't +// happen at runtime. + +// Tokens returns the URL to the NBI tokens endpoint. +func (b Connection) Tokens() *url.URL { + return b.buildUrl("/osm/admin/v1/tokens") +} + +// NsDescriptors returns the URL to the NBI NS descriptors endpoint. +func (b Connection) NsDescriptors() *url.URL { + return b.buildUrl("/osm/nsd/v1/ns_descriptors") +} + +// VimAccounts returns the URL to the VIM accounts endpoint. +func (b Connection) VimAccounts() *url.URL { + return b.buildUrl("/osm/admin/v1/vim_accounts") +} + +// NsInstances returns the URL to the NS instances content endpoint. +func (b Connection) NsInstancesContent() *url.URL { + return b.buildUrl("/osm/nslcm/v1/ns_instances_content") +} + +// NsInstancesAction returns the URL to the NS instances action endpoint +// for the NS instance identified by the given ID. +func (b Connection) NsInstancesAction(nsInstanceId string) *url.URL { + path := fmt.Sprintf("/osm/nslcm/v1/ns_instances/%s/action", nsInstanceId) + return b.buildUrl(path) +} + +// VnfPackagesContent returns the URL to the VNF packages content endpoint. +func (b Connection) VnfPackagesContent() *url.URL { + return b.buildUrl("/osm/vnfpkgm/v1/vnf_packages_content") +} + +// VnfPackageContent returns the URL to the endpoint of the VNF package +// content identified by the given ID. +func (b Connection) VnfPackageContent(pkgId string) *url.URL { + path := fmt.Sprintf("/osm/vnfpkgm/v1/vnf_packages_content/%s", pkgId) + return b.buildUrl(path) +} + +// NsPackagesContent returns the URL to the NS packages content endpoint. +func (b Connection) NsPackagesContent() *url.URL { + return b.buildUrl("/osm/nsd/v1/ns_descriptors_content") +} + +// NsPackageContent returns the URL to the endpoint of the NS package +// content identified by the given ID. +func (b Connection) NsPackageContent(pkgId string) *url.URL { + path := fmt.Sprintf("/osm/nsd/v1/ns_descriptors_content/%s", pkgId) + return b.buildUrl(path) +} diff --git a/osmops/nbic/nbi_data_test.go b/osmops/nbic/nbi_data_test.go new file mode 100644 index 0000000..5afaf26 --- /dev/null +++ b/osmops/nbic/nbi_data_test.go @@ -0,0 +1,695 @@ +package nbic + +// expired on Wed Sep 08 2021 18:52:11 GMT+0000 +var expiredNbiTokenPayload = `{ + "issued_at": 1631123531.1251214, + "expires": 1631127131.1251214, + "_id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "project_id": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "project_name": "admin", + "username": "admin", + "user_id": "5c6f2d64-9c23-4718-806a-c74c3fc3c98f", + "admin": true, + "roles": [{ + "name": "system_admin", + "id": "cb545e44-cd2b-4c0b-93aa-7e2cee79afc3" + }] +}` + +// expires on Sat May 17 2053 20:38:51 GMT+0000 +var validNbiTokenPayload = `{ + "issued_at": 2631127131.1251214, + "expires": 2631127131.1251214, + "_id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "id": "TuD41hLjDvjlR2cPcAFvWcr6FGvRhIk2", + "project_id": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "project_name": "admin", + "username": "admin", + "user_id": "5c6f2d64-9c23-4718-806a-c74c3fc3c98f", + "admin": true, + "roles": [{ + "name": "system_admin", + "id": "cb545e44-cd2b-4c0b-93aa-7e2cee79afc3" + }] +}` + +var vnfDescriptors = `[ + { + "_id": "4ffdeb67-92e7-46fa-9fa2-331a4d674137", + "description": "KNF with single KDU using a helm-chart for openldap version 1.2.7", + "df": [ + { + "id": "default-df" + } + ], + "ext-cpd": [ + { + "id": "mgmt-ext", + "k8s-cluster-net": "mgmtnet" + } + ], + "id": "openldap_knf", + "k8s-cluster": { + "nets": [ + { + "id": "mgmtnet" + } + ] + }, + "kdu": [ + { + "name": "ldap", + "helm-chart": "stable/openldap:1.2.7" + } + ], + "mgmt-cp": "mgmt-ext", + "product-name": "openldap_knf", + "provider": "Telefonica", + "version": "1.0", + "_admin": { + "userDefinedData": {}, + "created": 1655475517.840946, + "modified": 1655478654.0081894, + "projects_read": [ + "c9e9cf6f-98a4-45f8-b18d-b70d93422d88" + ], + "projects_write": [ + "c9e9cf6f-98a4-45f8-b18d-b70d93422d88" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "4ffdeb67-92e7-46fa-9fa2-331a4d674137", + "pkg-dir": "openldap_knf", + "descriptor": "openldap_knf/openldap_vnfd.yaml", + "zipfile": "openldap_knf.tar.gz" + } + }, + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137" + }, + "vnfd": { + "href": "/vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137/vnfd" + }, + "packageContent": { + "href": "/vnfpkgm/v1/vnf_packages/4ffdeb67-92e7-46fa-9fa2-331a4d674137/package_content" + } + } + }, + { + "_id": "5ccfed39-92e7-46fa-9fa2-331a4d674137", + "description": "Made-up KNF with single KDU using a helm-chart for openldap version 1.2.7", + "df": [ + { + "id": "default-df" + } + ], + "ext-cpd": [ + { + "id": "mgmt-ext", + "k8s-cluster-net": "mgmtnet" + } + ], + "id": "dummy_knf", + "k8s-cluster": { + "nets": [ + { + "id": "mgmtnet" + } + ] + }, + "kdu": [ + { + "name": "ldap", + "helm-chart": "stable/openldap:1.2.7" + } + ], + "mgmt-cp": "mgmt-ext", + "product-name": "dummy_knf", + "provider": "big corp", + "version": "1.0", + "_admin": { + "userDefinedData": {}, + "created": 1655475517.840946, + "modified": 1655478654.0081894, + "projects_read": [ + "c9e9cf6f-98a4-45f8-b18d-b70d93422d88" + ], + "projects_write": [ + "c9e9cf6f-98a4-45f8-b18d-b70d93422d88" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "5ccfed39-92e7-46fa-9fa2-331a4d674137", + "pkg-dir": "dummy_knf", + "descriptor": "dummy_knf/openldap_vnfd.yaml", + "zipfile": "dummy_knf.tar.gz" + } + }, + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/vnfpkgm/v1/vnf_packages/5ccfed39-92e7-46fa-9fa2-331a4d674137" + }, + "vnfd": { + "href": "/vnfpkgm/v1/vnf_packages/5ccfed39-92e7-46fa-9fa2-331a4d674137/vnfd" + }, + "packageContent": { + "href": "/vnfpkgm/v1/vnf_packages/5ccfed39-92e7-46fa-9fa2-331a4d674137/package_content" + } + } + } +]` + +var nsDescriptors = `[ + { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + }, + "nsdOnboardingState": "ONBOARDED", + "nsdOperationalState": "ENABLED", + "nsdUsageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03" + }, + "nsd_content": { + "href": "/nsd/v1/ns_descriptors/aba58e40-d65f-4f4e-be0a-e248c14d3e03/nsd_content" + } + } + }, + { + "_id": "ddd20a30-d65f-4f4e-be0a-e248c14d3e03", + "id": "dummy_ns", + "designer": "OSM", + "version": "1.0", + "name": "dummy_ns", + "vnfd-id": [ + "dummy_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "dummy", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "dummy", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "dummy_knf" + } + ] + } + ], + "description": "Made-up NS consisting of a single KNF dummy_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "ddd20a30-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "dummy_ns", + "descriptor": "dummy_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + }, + "nsdOnboardingState": "ONBOARDED", + "nsdOperationalState": "ENABLED", + "nsdUsageState": "NOT_IN_USE", + "_links": { + "self": { + "href": "/nsd/v1/ns_descriptors/ddd20a30-d65f-4f4e-be0a-e248c14d3e03" + }, + "nsd_content": { + "href": "/nsd/v1/ns_descriptors/ddd20a30-d65f-4f4e-be0a-e248c14d3e03/nsd_content" + } + } + } +]` + +var vimAccounts = `[ + { + "_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "name": "mylocation1", + "vim_type": "dummy", + "description": null, + "vim_url": "http://localhost/dummy", + "vim_user": "u", + "vim_password": "fNnfmd3KFXvfyVKu3nzItg==", + "vim_tenant_name": "p", + "_admin": { + "created": 1631212983.5388303, + "modified": 1631212983.5388303, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "operationalState": "ENABLED", + "operations": [ + { + "lcmOperationType": "create", + "operationState": "COMPLETED", + "startTime": 1631212983.5930278, + "statusEnteredTime": 1631212984.0220273, + "operationParams": null + } + ], + "current_operation": null, + "detailed-status": "" + }, + "schema_version": "1.11", + "admin": { + "current_operation": 0 + } + } +]` + +var nsInstancesContent = `[ + { + "_id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "name": "ldap", + "name-ref": "ldap", + "short-name": "ldap", + "admin-status": "ENABLED", + "nsState": "READY", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "ae63ee09-847f-4108-9a22-852899b6e0ae" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631277626.5666356, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "id": "0335c32c-d28c-4d79-9b94-0ffa36326932", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631277626.626409, + "modified": 1631285336.7610166, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0098084071", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'info': {'deleted': '', 'description': 'Install complete', 'first_deployed': '2021-09-10T12:40:56.55575157Z', 'last_deployed': '2021-09-10T12:40:56.55575157Z', 'status': 'deployed'}, 'name': 'stable-openldap-1-2-3-0098084071', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 1}", + "operation": "install", + "status": "Install complete", + "status-time": "1631277711.4568162" + } + ] + } + } + }, + { + "_id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "name": "ldap2", + "name-ref": "ldap2", + "short-name": "ldap2", + "admin-status": "ENABLED", + "nsState": "READ", + "currentOperation": "IDLE", + "currentOperationID": null, + "errorDescription": null, + "errorDetail": null, + "deploymentStatus": null, + "configurationStatus": [], + "vcaStatus": null, + "nsd": { + "_id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "id": "openldap_ns", + "designer": "OSM", + "version": "1.0", + "name": "openldap_ns", + "vnfd-id": [ + "openldap_knf" + ], + "virtual-link-desc": [ + { + "id": "mgmtnet", + "mgmt-network": true + } + ], + "df": [ + { + "id": "default-df", + "vnf-profile": [ + { + "id": "openldap", + "virtual-link-connectivity": [ + { + "constituent-cpd-id": [ + { + "constituent-base-element-id": "openldap", + "constituent-cpd-id": "mgmt-ext" + } + ], + "virtual-link-profile-id": "mgmtnet" + } + ], + "vnfd-id": "openldap_knf" + } + ] + } + ], + "description": "NS consisting of a single KNF openldap_knf connected to mgmt network", + "_admin": { + "userDefinedData": {}, + "created": 1631268635.96618, + "modified": 1631268637.8627107, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "onboardingState": "ONBOARDED", + "operationalState": "ENABLED", + "usageState": "NOT_IN_USE", + "storage": { + "fs": "mongo", + "path": "/app/storage/", + "folder": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "pkg-dir": "openldap_ns", + "descriptor": "openldap_ns/openldap_nsd.yaml", + "zipfile": "openldap_ns.tar.gz" + } + } + }, + "datacenter": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "resource-orchestrator": "osmopenmano", + "description": "default description", + "constituent-vnfr-ref": [ + "609ae829-8fbe-44f1-944d-2fba5cd909c2" + ], + "operational-status": "running", + "config-status": "configured", + "detailed-status": "Done", + "orchestration-progress": {}, + "create-time": 1631282159.0447648, + "nsd-name-ref": "openldap_ns", + "operational-events": [], + "nsd-ref": "openldap_ns", + "nsd-id": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "vnfd-id": [ + "d506d18f-0738-42ab-8b45-cfa98da38e7a" + ], + "instantiate_params": { + "nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", + "nsName": "ldap2", + "nsDescription": "default description", + "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547" + }, + "additionalParamsForNs": null, + "ns-instance-config-ref": "136fcc46-c363-4d74-af14-c115fff7d80a", + "id": "136fcc46-c363-4d74-af14-c115fff7d80a", + "ssh-authorized-key": null, + "flavor": [], + "image": [], + "vld": [ + { + "id": "mgmtnet", + "mgmt-network": true, + "name": "mgmtnet", + "type": null, + "vim_info": { + "vim:4a4425f7-3e72-4d45-a4ec-4241186f3547": { + "vim_account_id": "4a4425f7-3e72-4d45-a4ec-4241186f3547", + "vim_network_name": null, + "vim_details": "{name: mgmtnet, status: ACTIVE}\n", + "vim_id": "81a7fb44-b765-4b16-985f-13b481d3b892", + "vim_status": "ACTIVE", + "vim_name": "mgmtnet" + } + } + } + ], + "_admin": { + "created": 1631282159.0555632, + "modified": 1631285403.5654724, + "projects_read": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "projects_write": [ + "fada443a-905c-4241-8a33-4dcdbdac55e7" + ], + "nsState": "INSTANTIATED", + "current-operation": null, + "nslcmop": null, + "operation-type": null, + "deployed": { + "RO": { + "vnfd": [], + "operational-status": "running" + }, + "VCA": [], + "K8s": [ + { + "kdu-instance": "stable-openldap-1-2-3-0044064996", + "k8scluster-uuid": "kube-system:b33b0bfd-ce33-47b9-b286-a60c8f04b6d9", + "k8scluster-type": "helm-chart-v3", + "member-vnf-index": "openldap", + "kdu-name": "ldap", + "kdu-model": "stable/openldap:1.2.3", + "namespace": "fada443a-905c-4241-8a33-4dcdbdac55e7", + "kdu-deployment-name": "", + "detailed-status": "{'config': {'replicaCount': '2'}, 'info': {'deleted': '', 'description': 'Install complete', 'first_deployed': '2021-09-10T13:56:20.089257801Z', 'last_deployed': '2021-09-10T13:56:20.089257801Z', 'status': 'deployed'}, 'name': 'stable-openldap-1-2-3-0044064996', 'namespace': 'fada443a-905c-4241-8a33-4dcdbdac55e7', 'version': 1}", + "operation": "install", + "status": "Install complete", + "status-time": "1631282216.1732676" + } + ] + } + } + }, + { + "_id": "111fcc46-c363-4d74-af14-c115fff7d80a", + "name": "dup-name" + }, + { + "_id": "222fcc46-c363-4d74-af14-c115fff7d80a", + "name": "dup-name" + } +]` diff --git a/osmops/nbic/nbi_test.go b/osmops/nbic/nbi_test.go new file mode 100644 index 0000000..c010a3e --- /dev/null +++ b/osmops/nbic/nbi_test.go @@ -0,0 +1,157 @@ +package nbic + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "path" + "strings" + + u "github.com/martel-innovate/osmops/osmops/util/http" +) + +func stringReader(data string) io.ReadCloser { + return io.NopCloser(strings.NewReader(data)) +} + +type requestReply struct { + req *http.Request + res *http.Response +} + +type mockNbi struct { + handlers map[string]u.ReqSender + exchanges []requestReply + packages map[string][]byte +} + +func newMockNbi() *mockNbi { + mock := &mockNbi{ + handlers: map[string]u.ReqSender{}, + exchanges: []requestReply{}, + packages: map[string][]byte{}, + } + + mock.handlers[handlerKey("POST", "/osm/admin/v1/tokens")] = tokenHandler + mock.handlers[handlerKey("GET", "/osm/nsd/v1/ns_descriptors")] = nsDescHandler + mock.handlers[handlerKey("GET", "/osm/admin/v1/vim_accounts")] = vimAccHandler + mock.handlers[handlerKey("GET", "/osm/nslcm/v1/ns_instances_content")] = nsInstContentHandler + mock.handlers[handlerKey("POST", "/osm/nslcm/v1/ns_instances_content")] = nsInstContentHandler + mock.handlers[handlerKey("POST", + "/osm/nslcm/v1/ns_instances/0335c32c-d28c-4d79-9b94-0ffa36326932/action")] = nsInstActionHandler + mock.handlers[handlerKey("GET", + "/osm/vnfpkgm/v1/vnf_packages_content")] = vnfDescHandler + mock.handlers[handlerKey("POST", + "/osm/vnfpkgm/v1/vnf_packages_content")] = mock.createPkgHandler + mock.handlers[handlerKey("PUT", + "/osm/vnfpkgm/v1/vnf_packages_content/")] = mock.updatePkgHandler + mock.handlers[handlerKey("POST", + "/osm/nsd/v1/ns_descriptors_content")] = mock.createPkgHandler + mock.handlers[handlerKey("PUT", + "/osm/nsd/v1/ns_descriptors_content/")] = mock.updatePkgHandler + + return mock +} + +func handlerKey(method string, path string) string { + return fmt.Sprintf("%s %s", method, path) +} + +func (s *mockNbi) lookupHandler(req *http.Request) (u.ReqSender, error) { + key := handlerKey(req.Method, req.URL.Path) + if handle, ok := s.handlers[key]; ok { + return handle, nil + } + for k, handle := range s.handlers { + if strings.HasPrefix(key, k) { + return handle, nil + } + } + return nil, fmt.Errorf("no handler for request: %s", key) +} + +func (s *mockNbi) exchange(req *http.Request) (*http.Response, error) { + handle, err := s.lookupHandler(req) + if err != nil { + return &http.Response{StatusCode: http.StatusInternalServerError}, err + } + + res, err := handle(req) + rr := requestReply{req: req, res: res} + s.exchanges = append(s.exchanges, rr) + + return res, err +} + +func tokenHandler(req *http.Request) (*http.Response, error) { + reqCreds := UserCredentials{} + json.NewDecoder(req.Body).Decode(&reqCreds) + if reqCreds.Password != usrCreds.Password { + return &http.Response{StatusCode: http.StatusUnauthorized}, nil + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(validNbiTokenPayload), + }, nil +} + +func vnfDescHandler(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(vnfDescriptors), + }, nil +} + +func nsDescHandler(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(nsDescriptors), + }, nil +} + +func vimAccHandler(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(vimAccounts), + }, nil +} + +func nsInstContentHandler(req *http.Request) (*http.Response, error) { + if req.Method == "GET" { + return &http.Response{ + StatusCode: http.StatusOK, + Body: stringReader(nsInstancesContent), + }, nil + } + + // POST + return &http.Response{StatusCode: http.StatusCreated}, nil +} + +func nsInstActionHandler(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: http.StatusAccepted}, nil +} + +func (m *mockNbi) createPkgHandler(req *http.Request) (*http.Response, error) { + name := strings.TrimSuffix(req.Header.Get("Content-Filename"), ".tar.gz") + if name == "" { + return &http.Response{StatusCode: http.StatusBadRequest}, nil + } + + if _, ok := m.packages[name]; ok { + return &http.Response{StatusCode: http.StatusConflict}, nil + } + + pkgTgzData, _ := io.ReadAll(req.Body) + m.packages[name] = pkgTgzData + return &http.Response{StatusCode: http.StatusCreated}, nil +} + +func (m *mockNbi) updatePkgHandler(req *http.Request) (*http.Response, error) { + osmPkgId := path.Base(req.URL.Path) + pkgTgzData, _ := io.ReadAll(req.Body) + m.packages[osmPkgId] = pkgTgzData + return &http.Response{StatusCode: http.StatusOK}, nil +} diff --git a/osmops/nbic/nsdescriptors.go b/osmops/nbic/nsdescriptors.go new file mode 100644 index 0000000..a78caee --- /dev/null +++ b/osmops/nbic/nsdescriptors.go @@ -0,0 +1,53 @@ +package nbic + +type nsDescView struct { // only the response fields we care about. + Id string `json:"_id"` + Name string `json:"id"` +} + +type nsDescMap map[string]string + +func buildNsDescMap(ds []nsDescView) nsDescMap { + descMap := map[string]string{} + for _, d := range ds { + descMap[d.Name] = d.Id + } + return descMap +} + +// NOTE. NSD name to ID lookup. +// For our nsDescMap to work, there must be a bijection between NSD IDs and +// name IDs. Luckily, this is the case since OSM NBI enforces uniqueness of +// NSD name IDs. If you try uploading another package with a NSD having the +// same name ID of an existing one, OSM NBI will complain loudly, e.g. +// +// HTTP/1.1 409 Conflict +// ... +// { +// "code": "CONFLICT", +// "status": 409, +// "detail": "nsd with id 'openldap_ns' already exists for this project" +// } + +func (c *Session) getNsDescriptors() ([]nsDescView, error) { + data := []nsDescView{} + if _, err := c.getJson(c.conn.NsDescriptors(), &data); err != nil { + return nil, err + } + return data, nil +} + +func (c *Session) lookupNsDescriptorId(name string) (string, error) { + if c.nsdMap == nil { + if ds, err := c.getNsDescriptors(); err != nil { + return "", err + } else { + c.nsdMap = buildNsDescMap(ds) + } + } + if id, ok := c.nsdMap[name]; !ok { + return "", &missingDescriptor{typ: "NSD", name: name} + } else { + return id, nil + } +} diff --git a/osmops/nbic/nsdescriptors_test.go b/osmops/nbic/nsdescriptors_test.go new file mode 100644 index 0000000..c5f76a7 --- /dev/null +++ b/osmops/nbic/nsdescriptors_test.go @@ -0,0 +1,71 @@ +package nbic + +import ( + "testing" +) + +func TestLookupNsDescIdUseCachedData(t *testing.T) { + nbic := &Session{ + nsdMap: map[string]string{"silly_ns": "324567"}, + } + id, err := nbic.lookupNsDescriptorId("silly_ns") + if err != nil { + t.Errorf("want: 324567; got: %v", err) + } + if id != "324567" { + t.Errorf("want: 324567; got: %s", id) + } +} + +func TestLookupNsDescIdErrorOnMiss(t *testing.T) { + nbic := &Session{ + nsdMap: map[string]string{"silly_ns": "324567"}, + } + if _, err := nbic.lookupNsDescriptorId("not there!"); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestLookupNsDescIdFetchDataFromServer(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + wantId := "aba58e40-d65f-4f4e-be0a-e248c14d3e03" + if gotId, err := nbic.lookupNsDescriptorId("openldap_ns"); err != nil { + t.Errorf("want: %s; got: %v", wantId, err) + } else { + if gotId != wantId { + t.Errorf("want: %s; got: %v", wantId, gotId) + } + } + + if len(nbi.exchanges) != 2 { + t.Fatalf("want: 2; got: %d", len(nbi.exchanges)) + } + rr1, rr2 := nbi.exchanges[0], nbi.exchanges[1] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.NsDescriptors().Path { + t.Errorf("want: %s; got: %s", urls.NsDescriptors().Path, rr2.req.URL.Path) + } +} + +func TestLookupNsDescIdFetchDataFromServerTokenError(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, UserCredentials{}, nbi.exchange) + + if _, err := nbic.lookupNsDescriptorId("openldap_ns"); err == nil { + t.Errorf("want: error; got: nil") + } + + if len(nbi.exchanges) != 1 { + t.Fatalf("want: 1; got: %d", len(nbi.exchanges)) + } + rr1 := nbi.exchanges[0] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } +} diff --git a/osmops/nbic/nsinstances.go b/osmops/nbic/nsinstances.go new file mode 100644 index 0000000..0012ff9 --- /dev/null +++ b/osmops/nbic/nsinstances.go @@ -0,0 +1,202 @@ +package nbic + +import ( + "fmt" + + u "github.com/martel-innovate/osmops/osmops/util" +) + +type nsInstanceView struct { // only the response fields we care about. + Id string `json:"_id"` + Name string `json:"name"` +} + +type nsInstanceMap map[string][]string + +func (m nsInstanceMap) addMapping(name string, id string) { + if entry, ok := m[name]; ok { + m[name] = append(entry, id) + } else { + m[name] = []string{id} + } +} + +func buildNsInstanceMap(vs []nsInstanceView) nsInstanceMap { + nsMap := nsInstanceMap{} + for _, v := range vs { + nsMap.addMapping(v.Name, v.Id) + } + return nsMap +} + +// NOTE. NS instance name to ID lookup. +// OSM NBI doesn't enforce uniqueness of NS names. In fact, it lets you happily +// create a new instance even if an existing one has the same name, e.g. +// +// $ curl localhost/osm/nslcm/v1/ns_instances_content \ +// -v -X POST \ +// -H 'Authorization: Bearer 0WhgBufy1Wt82NbF9OsmftwpRfcsV4sU' \ +// -H 'Content-Type: application/yaml' \ +// -d'{"nsdId": "aba58e40-d65f-4f4e-be0a-e248c14d3e03", "nsName": "ldap", "nsDescription": "default description", "vimAccountId": "4a4425f7-3e72-4d45-a4ec-4241186f3547"}' +// ... +// HTTP/1.1 201 Created +// ... +// --- +// id: 794ef9a2-8bbb-42c1-869a-bab6422982ec +// nslcmop_id: 0fdfaa6a-b742-480c-9701-122b3f732e4 +// +// This is why we map an NS instance name to a list of IDs. + +func (c *Session) getNsInstancesContent() ([]nsInstanceView, error) { + data := []nsInstanceView{} + if _, err := c.getJson(c.conn.NsInstancesContent(), &data); err != nil { + return nil, err + } + return data, nil +} + +type maybeNsInstId *string + +func (c *Session) lookupNsInstanceId(name string) (maybeNsInstId, error) { + if c.nsInstMap == nil { + if vs, err := c.getNsInstancesContent(); err != nil { + return nil, err + } else { + c.nsInstMap = buildNsInstanceMap(vs) + } + } + if ids, ok := c.nsInstMap[name]; !ok { + return nil, nil + } else { + if len(ids) != 1 { + return nil, + fmt.Errorf("NS instance name not bound to a single ID: %v", ids) + } + return &ids[0], nil + } +} + +// NsInstanceContent holds the data to create or update an NS instance. +// For now we only support creating or updating KNFs. For a create or +// update operation to work, the target KNF must've been "on-boarded" +// in OSM already. So there must be, in OSM, a NSD and VNFD for it. +type NsInstanceContent struct { + // The name of the target NS instance to create or update. + Name string + // Short description of the NS instance. + Description string + // The name of the NSD that defines the NS instance. + NsdName string + // The name of the VIM account to use for creating/updating the NS instance. + VimAccountName string + // The name of the VNF to use when updating the NS instance. + VnfName string + // The name of the KDU for the NS as specified in the VNFD. + KduName string + // Any KNF-specific parameters to create or update the NS instance. + KduParams interface{} +} + +type nsInstContentDto struct { + NsName string `json:"nsName"` + NsdId string `json:"nsdId"` + NsDescription string `json:"nsDescription"` + VimAccountId string `json:"vimAccountId"` + AdditionalParamsForVnf []additionalParamsForVnfDto `json:"additionalParamsForVnf,omitempty"` +} + +type additionalParamsForVnfDto struct { + MemberVnfIndex string `json:"member-vnf-index"` + AdditionalParamsForKdu []additionalParamsForKduDto `json:"additionalParamsForKdu"` +} + +type additionalParamsForKduDto struct { + KduName string `json:"kdu_name"` + AdditionalParams interface{} `json:"additionalParams"` +} + +type nsInstanceContentActionDto struct { + MemberVnfIndex string `json:"member_vnf_index"` + KduName string `json:"kdu_name"` + Primitive string `json:"primitive"` + PrimitiveParams interface{} `json:"primitive_params"` +} + +func (c *Session) CreateOrUpdateNsInstance(data *NsInstanceContent) error { + if data == nil { + return fmt.Errorf("nil data") + } + + nsId, err := c.lookupNsInstanceId(data.Name) + if err != nil { + return err + } + if nsId == nil { + return c.createNsInstance(data) + } + return c.updateNsInstance(*nsId, data) +} + +func toNsInstContentDto(nsdId string, vimAccId string, + data *NsInstanceContent) *nsInstContentDto { + dto := nsInstContentDto{ + NsName: data.Name, + NsdId: nsdId, + NsDescription: data.Description, + VimAccountId: vimAccId, + } + if data.KduParams != nil { + dto.AdditionalParamsForVnf = []additionalParamsForVnfDto{ + { + MemberVnfIndex: data.VnfName, + AdditionalParamsForKdu: []additionalParamsForKduDto{ + { + KduName: data.KduName, + AdditionalParams: data.KduParams, + }, + }, + }, + } + } + return &dto +} + +func (c *Session) createNsInstance(data *NsInstanceContent) error { + nsdId, err := c.lookupNsDescriptorId(data.NsdName) + if err != nil { + return err + } + vimAccId, err := c.lookupVimAccountId(data.VimAccountName) + if err != nil { + return err + } + dto := toNsInstContentDto(nsdId, vimAccId, data) + + _, err = c.postJson(c.conn.NsInstancesContent(), dto) + return err +} + +var nsAction = struct { + u.StrEnum + CREATE, UPGRADE, DELETE u.EnumIx +}{ + StrEnum: u.NewStrEnum("create", "upgrade", "delete"), + CREATE: 0, + UPGRADE: 1, + DELETE: 2, +} + +func toNsInstanceContentActionDto(nsId string, data *NsInstanceContent) *nsInstanceContentActionDto { + return &nsInstanceContentActionDto{ + MemberVnfIndex: data.VnfName, + KduName: data.KduName, + Primitive: nsAction.LabelOf(nsAction.UPGRADE), + PrimitiveParams: data.KduParams, + } +} + +func (c *Session) updateNsInstance(nsId string, data *NsInstanceContent) error { + dto := toNsInstanceContentActionDto(nsId, data) + _, err := c.postJson(c.conn.NsInstancesAction(nsId), dto) + return err +} diff --git a/osmops/nbic/nsinstances_test.go b/osmops/nbic/nsinstances_test.go new file mode 100644 index 0000000..5ec57b2 --- /dev/null +++ b/osmops/nbic/nsinstances_test.go @@ -0,0 +1,337 @@ +package nbic + +import ( + "io/ioutil" + "reflect" + "testing" + + "gopkg.in/yaml.v2" +) + +func TestBuildNsInstanceMap(t *testing.T) { + vs := []nsInstanceView{ + {Id: "1", Name: "a"}, {Id: "2", Name: "a"}, {Id: "3", Name: "b"}, + } + nsMap := buildNsInstanceMap(vs) + + if got, ok := nsMap["a"]; !ok { + t.Errorf("want: a; got: nil") + } else { + want := []string{"1", "2"} + if !reflect.DeepEqual(got, want) { + t.Errorf("want: %v; got: %v", want, got) + } + } + + if got, ok := nsMap["b"]; !ok { + t.Errorf("want: b; got: nil") + } else { + want := []string{"3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("want: %v; got: %v", want, got) + } + } + + if got, ok := nsMap["c"]; ok { + t.Errorf("want: nil; got: %v", got) + } +} + +func TestLookupNsInstIdUseCachedData(t *testing.T) { + nbic := &Session{ + nsInstMap: map[string][]string{"silly_ns": {"324567"}}, + } + id, err := nbic.lookupNsInstanceId("silly_ns") + if err != nil { + t.Errorf("want: 324567; got: %v", err) + } + if *id != "324567" { + t.Errorf("want: 324567; got: %s", *id) + } +} + +func TestLookupNsInstIdNilOnMiss(t *testing.T) { + nbic := &Session{ + nsInstMap: map[string][]string{"silly_ns": {"324567"}}, + } + if got, err := nbic.lookupNsInstanceId("not there!"); err != nil { + t.Errorf("want: nil; got: %v", err) + } else { + if got != nil { + t.Errorf("want: nil; got: %v", *got) + } + } +} + +func TestLookupNsInstIdFetchDataFromServer(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + wantId := "0335c32c-d28c-4d79-9b94-0ffa36326932" + if gotId, err := nbic.lookupNsInstanceId("ldap"); err != nil { + t.Errorf("want: %s; got: %v", wantId, err) + } else { + if *gotId != wantId { + t.Errorf("want: %s; got: %v", wantId, *gotId) + } + } + + if len(nbi.exchanges) != 2 { + t.Fatalf("want: 2; got: %d", len(nbi.exchanges)) + } + rr1, rr2 := nbi.exchanges[0], nbi.exchanges[1] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.NsInstancesContent().Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesContent().Path, rr2.req.URL.Path) + } +} + +func TestLookupNsInstIdFetchDataFromServerTokenError(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, UserCredentials{}, nbi.exchange) + + if _, err := nbic.lookupNsInstanceId("ldap"); err == nil { + t.Errorf("want: error; got: nil") + } + + if len(nbi.exchanges) != 1 { + t.Fatalf("want: 1; got: %d", len(nbi.exchanges)) + } + rr1 := nbi.exchanges[0] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } +} + +func TestLookupNsInstIdFetchDataFromServerDupNameError(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + if _, err := nbic.lookupNsInstanceId("dup-name"); err == nil { + t.Errorf("want: error; got: nil") + } + + if len(nbi.exchanges) != 2 { + t.Fatalf("want: 2; got: %d", len(nbi.exchanges)) + } + rr1, rr2 := nbi.exchanges[0], nbi.exchanges[1] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.NsInstancesContent().Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesContent().Path, rr2.req.URL.Path) + } +} + +func TestCreateOrUpdateNsInstanceErrorOnNilData(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + if got := nbic.CreateOrUpdateNsInstance(nil); got == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestCreateNsInstanceErrorOnMissingNsd(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + data := NsInstanceContent{ + Name: "not-there", + Description: "wada wada", + NsdName: "not there!", + VimAccountName: "mylocation1", + } + if err := nbic.CreateOrUpdateNsInstance(&data); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestCreateNsInstanceErrorOnMissingVimAccount(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + data := NsInstanceContent{ + Name: "not-there", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "not there!", + } + if err := nbic.CreateOrUpdateNsInstance(&data); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestCreateNsInstanceErrorOnDupNsInstanceName(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + data := NsInstanceContent{ + Name: "dup-name", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "mylocation1", + } + if err := nbic.CreateOrUpdateNsInstance(&data); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func assertCreateNsInstanceHttpFlow(t *testing.T, urls Connection, + flow []requestReply) string { + if len(flow) != 5 { + t.Fatalf("want: 5; got: %d", len(flow)) + } + rr1, rr2, rr3, rr4, rr5 := flow[0], flow[1], flow[2], flow[3], flow[4] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.NsInstancesContent().Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesContent().Path, rr2.req.URL.Path) + } + if rr3.req.URL.Path != urls.NsDescriptors().Path { + t.Errorf("want: %s; got: %s", urls.NsDescriptors().Path, rr3.req.URL.Path) + } + if rr4.req.URL.Path != urls.VimAccounts().Path { + t.Errorf("want: %s; got: %s", urls.VimAccounts().Path, rr4.req.URL.Path) + } + if rr5.req.URL.Path != urls.NsInstancesContent().Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesContent().Path, rr5.req.URL.Path) + } + if rr5.req.Method != "POST" { + t.Errorf("want: POST; got: %s", rr5.req.Method) + } + got, err := ioutil.ReadAll(rr5.req.Body) + if err != nil { + t.Errorf("want: body; got: %v", err) + return "" + } + return string(got) +} + +func TestCreateNsInstanceWithNoAdditionalParams(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + data := NsInstanceContent{ + Name: "not-there", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "mylocation1", + } + if err := nbic.CreateOrUpdateNsInstance(&data); err != nil { + t.Errorf("want: create; got: %v", err) + } + + want := `{"nsName":"not-there","nsdId":"aba58e40-d65f-4f4e-be0a-e248c14d3e03","nsDescription":"wada wada","vimAccountId":"4a4425f7-3e72-4d45-a4ec-4241186f3547"}` + got := assertCreateNsInstanceHttpFlow(t, urls, nbi.exchanges) + if got != want { + t.Errorf("want: %s; got: %s", want, got) + } +} + +type kduYamlParams struct { + Params interface{} `yaml:"params"` +} + +var yamlParamsData = []byte(`--- +params: + replicaCount: "2" +`) + +func TestCreateNsInstanceWithAdditionalParams(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + kdu := kduYamlParams{} + if err := yaml.Unmarshal(yamlParamsData, &kdu); err != nil { + t.Fatalf("unmarshal: %v", err) + } + data := NsInstanceContent{ + Name: "not-there", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "mylocation1", + VnfName: "openldap", + KduName: "ldap", + KduParams: kdu.Params, + } + if err := nbic.CreateOrUpdateNsInstance(&data); err != nil { + t.Errorf("want: create; got: %v", err) + } + + want := `{"nsName":"not-there","nsdId":"aba58e40-d65f-4f4e-be0a-e248c14d3e03","nsDescription":"wada wada","vimAccountId":"4a4425f7-3e72-4d45-a4ec-4241186f3547"` + want += `,"additionalParamsForVnf":[{"member-vnf-index":"openldap","additionalParamsForKdu":[{"kdu_name":"ldap","additionalParams":{"replicaCount":"2"}}]}]}` + got := assertCreateNsInstanceHttpFlow(t, urls, nbi.exchanges) + if got != want { + t.Errorf("want: %s; got: %s", want, got) + } +} + +func assertUpdateNsInstanceHttpFlow(t *testing.T, urls Connection, + nsInstanceId string, flow []requestReply) string { + if len(flow) != 3 { + t.Fatalf("want: 3; got: %d", len(flow)) + } + rr1, rr2, rr3 := flow[0], flow[1], flow[2] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.NsInstancesContent().Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesContent().Path, rr2.req.URL.Path) + } + if rr3.req.URL.Path != urls.NsInstancesAction(nsInstanceId).Path { + t.Errorf("want: %s; got: %s", urls.NsInstancesAction(nsInstanceId).Path, rr3.req.URL.Path) + } + if rr3.req.Method != "POST" { + t.Errorf("want: POST; got: %s", rr3.req.Method) + } + got, err := ioutil.ReadAll(rr3.req.Body) + if err != nil { + t.Errorf("want: body; got: %v", err) + return "" + } + return string(got) +} + +func TestUpdateNsInstance(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + nsInstanceId := "0335c32c-d28c-4d79-9b94-0ffa36326932" + + kdu := kduYamlParams{} + if err := yaml.Unmarshal(yamlParamsData, &kdu); err != nil { + t.Fatalf("unmarshal: %v", err) + } + data := NsInstanceContent{ + Name: "ldap", + Description: "wada wada", + NsdName: "openldap_ns", + VimAccountName: "mylocation1", + VnfName: "openldap", + KduName: "ldap", + KduParams: kdu.Params, + } + if err := nbic.CreateOrUpdateNsInstance(&data); err != nil { + t.Errorf("want: update; got: %v", err) + } + + want := `{"member_vnf_index":"openldap","kdu_name":"ldap","primitive":"upgrade","primitive_params":{"replicaCount":"2"}}` + got := assertUpdateNsInstanceHttpFlow(t, urls, nsInstanceId, nbi.exchanges) + if got != want { + t.Errorf("want: %s; got: %s", want, got) + } +} diff --git a/osmops/nbic/packages.go b/osmops/nbic/packages.go new file mode 100644 index 0000000..7962c73 --- /dev/null +++ b/osmops/nbic/packages.go @@ -0,0 +1,227 @@ +package nbic + +import ( + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/martel-innovate/osmops/osmops/pkgr" + "github.com/martel-innovate/osmops/osmops/util/file" + + //lint:ignore ST1001 HTTP EDSL is more readable w/o qualified import + . "github.com/martel-innovate/osmops/osmops/util/http" +) + +func (s *Session) CreateOrUpdatePackage(source file.AbsPath) error { + handler, err := newPkgHandler(s, source) + if err != nil { + return err + } + return handler.process() +} + +// pkgReader wraps Package to consolidate in one place all the assumptions +// this module makes about OSM packages in an OsmOps-managed repo. +// Specifically: +// +// - pkg name = pkg ID +// - VNF pkg => pgk name ends w/ "_knf" +// - NS pkg => pkg name ends w/ "_ns" +// +// None of the above needs to be true in general, but OsmOps relies on that +// at the moment to simplify the implementation. Eventually, we'll redo this +// properly, i.e. use a semantic approach (parse, interpret OSM files) rather +// than naming conventions and guesswork. +type pkgReader struct { + pkg *pkgr.Package + data []byte +} + +func newPkgReader(pkgSource file.AbsPath) (*pkgReader, error) { + pkg, err := pkgr.Pack(pkgSource) + if err != nil { + return nil, err + } + data, err := io.ReadAll(pkg.Data) + return &pkgReader{ + pkg: pkg, + data: data, + }, err +} + +func (r *pkgReader) Source() file.AbsPath { + return r.pkg.Source.Directory() +} + +func (r *pkgReader) Name() string { + return r.pkg.Name +} + +func (r *pkgReader) Id() string { + return r.Name() +} + +func (r *pkgReader) Data() []byte { + return r.data +} + +func (r *pkgReader) Hash() string { + return r.pkg.Hash +} + +func (r *pkgReader) IsNs() bool { + return strings.HasSuffix(r.Name(), "_ns") +} + +func (r *pkgReader) IsKnf() bool { + return strings.HasSuffix(r.Name(), "_knf") +} + +type pkgHandler struct { + session *Session + pkg *pkgReader + endpoint *url.URL + isUpdate bool +} + +func newPkgHandler(sesh *Session, pkgSrc file.AbsPath) (*pkgHandler, error) { + reader, err := newPkgReader(pkgSrc) + if err != nil { + return nil, err + } + handler := &pkgHandler{ + session: sesh, + pkg: reader, + } + if reader.IsKnf() { + return mkPkgHandler( + handler, handler.session.lookupVnfDescriptorId, + handler.session.conn.VnfPackagesContent, + handler.session.conn.VnfPackageContent) + } + if reader.IsNs() { + return mkPkgHandler( + handler, handler.session.lookupNsDescriptorId, + handler.session.conn.NsPackagesContent, + handler.session.conn.NsPackageContent) + } + return nil, unsupportedPackageType(reader) +} + +func unsupportedPackageType(pkg *pkgReader) error { + return fmt.Errorf("unsupported package type: %v", pkg.Source()) +} + +type lookupDescId func(pkgId string) (string, error) +type createEndpoint func() *url.URL +type updateEndpoint func(osmPkgId string) *url.URL + +func mkPkgHandler(h *pkgHandler, getOsmId lookupDescId, + createUrl createEndpoint, updateUrl updateEndpoint) (*pkgHandler, error) { + osmPkgId, err := getOsmId(h.pkg.Id()) + if _, ok := err.(*missingDescriptor); ok { + h.isUpdate = false + h.endpoint = createUrl() + + return h, nil + } + if err == nil { + h.isUpdate = true + h.endpoint = updateUrl(osmPkgId) + } + return h, err +} + +func (h *pkgHandler) process() error { + run := h.post + if h.isUpdate { + run = h.put + } + _, err := run() + return err +} + +func (h *pkgHandler) post() (*http.Response, error) { + req := Request( + POST, At(h.endpoint), + h.session.NbiAccessToken(), + Accept(MediaType.JSON), // same as what OSM client does + Content(MediaType.GZIP), // ditto + ContentFilename(h.pkg), // ditto + ContentFileMd5(h.pkg), // ditto + Body(h.pkg.Data()), + ) + req.SetHandler(ExpectSuccess()) + return req.RunWith(h.session.transport) +} + +func (h *pkgHandler) put() (*http.Response, error) { + descData, err := h.findPkgDescriptor() + if err != nil { + return nil, err + } + req := Request( + PUT, At(h.endpoint), + h.session.NbiAccessToken(), + Accept(MediaType.JSON), + Content(MediaType.YAML), + Body(descData), + ) + req.SetHandler(ExpectSuccess()) + return req.RunWith(h.session.transport) +} + +// NOTE. Package update. It's kinda weird the way it works, but most likely +// I'm missing something. In fact, our initial implementation of put uploaded +// the tarball. As it turns out, OSM client does something different. It tries +// finding a YAML file in the package dir, blindly assumes it's a VNFD or NSD +// and PUTs it in OSM. What if there are other files in the package? Well, +// I've got no idea why OSM client does that, but I've changed put's impl +// to be in line with OSM client's. +// OSM client's update methods: +// - https://osm.etsi.org/gitlab/osm/osmclient/-/blob/master/osmclient/sol005/vnfd.py +// - https://osm.etsi.org/gitlab/osm/osmclient/-/blob/master/osmclient/sol005/nsd.py + +func (h *pkgHandler) findPkgDescriptor() ([]byte, error) { + candidates := []string{} + for _, archivePath := range h.pkg.pkg.Source.SortedFilePaths() { + p := strings.ToLower(archivePath) + if strings.HasSuffix(p, ".yaml") || strings.HasSuffix(p, ".yml") { + candidates = append(candidates, archivePath) + } + } + if len(candidates) == 0 { // same as what OSM client does + return []byte{}, noDescriptorFound(h.pkg) + } + if len(candidates) > 1 { // same as what OSM client does + return []byte{}, moreThanOneDescriptorFound(h.pkg) + } + return h.pkg.pkg.Source.FileContent(candidates[0]) +} + +func moreThanOneDescriptorFound(pkg *pkgReader) error { + msg := "found more than one potential descriptor in: %v" + return fmt.Errorf(msg, pkg.Source()) +} + +func noDescriptorFound(pkg *pkgReader) error { + msg := "no descriptor found in: %v" + return fmt.Errorf(msg, pkg.Source()) +} + +func ContentFilename(pkg *pkgReader) ReqBuilder { + name := fmt.Sprintf("%s.tar.gz", pkg.Name()) + return func(request *http.Request) error { + request.Header.Set("Content-Filename", name) + return nil + } +} + +func ContentFileMd5(pkg *pkgReader) ReqBuilder { + return func(request *http.Request) error { + request.Header.Set("Content-File-MD5", pkg.Hash()) + return nil + } +} diff --git a/osmops/nbic/packages_test.go b/osmops/nbic/packages_test.go new file mode 100644 index 0000000..ed31b01 --- /dev/null +++ b/osmops/nbic/packages_test.go @@ -0,0 +1,190 @@ +package nbic + +import ( + "crypto/md5" + "fmt" + "net/http" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func findTestDataDir(pkgDirName string) file.AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDir := filepath.Join(enclosingDir, "packages_test_dir", pkgDirName) + p, _ := file.ParseAbsPath(testDataDir) + + return p +} + +func md5string(data []byte) string { + hash := md5.Sum(data) + return fmt.Sprintf("%x", hash) +} + +func callCreateOrUpdatePackage(pkgDirName string) (*mockNbi, error) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + pkgSrc := findTestDataDir(pkgDirName) + + return nbi, nbic.CreateOrUpdatePackage(pkgSrc) +} + +func checkUploadedPackage(t *testing.T, mockNbi *mockNbi, req *http.Request, + pkgDirName, osmPkgId string) { + gotFilename := req.Header.Get("Content-Filename") + gotHash := req.Header.Get("Content-File-MD5") + gotPkgTgzData := mockNbi.packages[osmPkgId] + + wantFilename := fmt.Sprintf("%s.tar.gz", pkgDirName) + if gotFilename != wantFilename { + t.Errorf("want file: %s; got: %s", wantFilename, gotFilename) + } + wantHash := md5string(gotPkgTgzData) + if gotHash != wantHash { + t.Errorf("want hash: %s; got: %s", wantHash, gotHash) + } +} + +func checkUploadedPackageDesc(t *testing.T, mockNbi *mockNbi, + pkgDirName, descFilePath, osmPkgId string) { + descFile := findTestDataDir(pkgDirName).Join(descFilePath) + gotDescData := mockNbi.packages[osmPkgId] + wantDescData, err := os.ReadFile(descFile.Value()) + + if err != nil { + t.Fatalf("couldn't read file: %v", descFile) + } + if !reflect.DeepEqual(wantDescData, gotDescData) { + t.Errorf("want desc: %v; got: %v", wantDescData, gotDescData) + } +} + +func checkUnsupportedPackageErr(t *testing.T, err error) { + if err == nil { + t.Fatalf("want err; got: nil") + } + if !strings.HasPrefix(err.Error(), "unsupported package type") { + t.Errorf("want unsupported pkg err; got: %v", err) + } +} + +func runCreatePackageTest(t *testing.T, pkgDirName string) { + mockNbi, err := callCreateOrUpdatePackage(pkgDirName) + + if err != nil { + t.Errorf("want: create package; got: %v", err) + } + if len(mockNbi.exchanges) != 3 { // #1 = get token + t.Fatalf("want: one req to lookup package, then one to create it; got: %d", + len(mockNbi.exchanges)-1) + } + + rr := mockNbi.exchanges[2] + checkUploadedPackage(t, mockNbi, rr.req, pkgDirName, pkgDirName) + if rr.res.StatusCode != http.StatusCreated { + t.Errorf("want status: %d; got: %d", + http.StatusCreated, rr.res.StatusCode) + } +} + +func runUpdatePackageTest(t *testing.T, pkgDirName, descFilePath, osmPkgId string) { + mockNbi, err := callCreateOrUpdatePackage(pkgDirName) + + if err != nil { + t.Errorf("want: update package; got: %v", err) + } + if len(mockNbi.exchanges) != 3 { // #1 = get token + t.Fatalf("want: one req to lookup package, then one to update it; got: %d", + len(mockNbi.exchanges)-1) + } + + updateExchange := mockNbi.exchanges[2] + checkUploadedPackageDesc(t, mockNbi, pkgDirName, descFilePath, osmPkgId) + if updateExchange.res.StatusCode != http.StatusOK { + t.Errorf("want update status: %d; got: %d", + http.StatusOK, updateExchange.res.StatusCode) + } +} + +func runUpdatePackageTestNoDescErr(t *testing.T, pkgDirName string) { + _, err := callCreateOrUpdatePackage(pkgDirName) + if err == nil { + t.Errorf("want: update package error; got: nil") + } + if !strings.HasPrefix(err.Error(), "no descriptor found") { + t.Errorf("want: no desc error; got: %v", err) + } +} + +func runUpdatePackageTestManyDescErr(t *testing.T, pkgDirName string) { + _, err := callCreateOrUpdatePackage(pkgDirName) + if err == nil { + t.Errorf("want: update package error; got: nil") + } + if !strings.HasPrefix(err.Error(), "found more than one potential descriptor") { + t.Errorf("want: many desc error; got: %v", err) + } +} + +func TestCreateKnfPackage(t *testing.T) { + runCreatePackageTest(t, "create_knf") +} + +func TestCreateNsPackage(t *testing.T) { + runCreatePackageTest(t, "create_ns") +} + +func TestUpdateKnfPackage(t *testing.T) { + osmPkgId := "4ffdeb67-92e7-46fa-9fa2-331a4d674137" // see vnfDescriptors + runUpdatePackageTest(t, "openldap_knf", "openldap_vnfd.yaml", osmPkgId) +} + +func TestUpdateNsPackage(t *testing.T) { + osmPkgId := "aba58e40-d65f-4f4e-be0a-e248c14d3e03" // see nsDescriptors + runUpdatePackageTest(t, "openldap_ns", "openldap_nsd.yaml", osmPkgId) +} + +func TestUpdateKnfPackageNoDescErr(t *testing.T) { + runUpdatePackageTestNoDescErr(t, "update_no_desc/openldap_knf") +} + +func TestUpdateNsPackageNoDescErr(t *testing.T) { + runUpdatePackageTestNoDescErr(t, "update_no_desc/openldap_ns") +} + +func TestUpdateKnfPackageManyDescErr(t *testing.T) { + runUpdatePackageTestManyDescErr(t, "update_many_desc/openldap_knf") +} + +func TestUpdateNsPackageManyDescErr(t *testing.T) { + runUpdatePackageTestManyDescErr(t, "update_many_desc/openldap_ns") +} + +func TestPackErrOnSourceDirAccess(t *testing.T) { + mockNbi, err := callCreateOrUpdatePackage("not-there_knf") + + if _, ok := err.(*file.VisitError); !ok { + t.Errorf("want: visit error; got: %v", err) + } + if len(mockNbi.exchanges) > 0 { + t.Errorf("want: no req to create or update package; got: %d", + len(mockNbi.exchanges)) + } +} + +func TestCreateUnsupportedPackage(t *testing.T) { + mockNbi, err := callCreateOrUpdatePackage("unsupported") + if len(mockNbi.exchanges) > 0 { + t.Errorf("want: no req to create or update package; got: %d", + len(mockNbi.exchanges)) + } + checkUnsupportedPackageErr(t, err) +} diff --git a/osmops/nbic/packages_test_dir/create_knf/some.yaml b/osmops/nbic/packages_test_dir/create_knf/some.yaml new file mode 100644 index 0000000..6d17323 --- /dev/null +++ b/osmops/nbic/packages_test_dir/create_knf/some.yaml @@ -0,0 +1 @@ +dummy: knf \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/create_ns/some.yaml b/osmops/nbic/packages_test_dir/create_ns/some.yaml new file mode 100644 index 0000000..f6686b4 --- /dev/null +++ b/osmops/nbic/packages_test_dir/create_ns/some.yaml @@ -0,0 +1 @@ +dummy: ns \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/openldap_knf/openldap_vnfd.yaml b/osmops/nbic/packages_test_dir/openldap_knf/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/osmops/nbic/packages_test_dir/openldap_knf/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/osmops/nbic/packages_test_dir/openldap_ns/README.md b/osmops/nbic/packages_test_dir/openldap_ns/README.md new file mode 100644 index 0000000..8424611 --- /dev/null +++ b/osmops/nbic/packages_test_dir/openldap_ns/README.md @@ -0,0 +1,26 @@ +# SIMPLE OPEN-LDAP CHART + +Descriptors that installs an openldap version 1.2.1 chart in a K8s cluster + +There is one VNF (openldap\_vnf) with only one KDU. + +There is one NS that connects the VNF to a mgmt network + +## Onboarding and instantiation + +```bash +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --ssh_keys ${HOME}/.ssh/id_rsa.pub +``` + +### Instantiation option + +Some parameters could be passed during the instantiation. + +* replicaCount: Number of Open LDAP replicas that will be created + +```bash +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", "additionalParams": {"replicaCount": "2"}}]}' +``` + diff --git a/osmops/nbic/packages_test_dir/openldap_ns/openldap_nsd.yaml b/osmops/nbic/packages_test_dir/openldap_ns/openldap_nsd.yaml new file mode 100644 index 0000000..b995164 --- /dev/null +++ b/osmops/nbic/packages_test_dir/openldap_ns/openldap_nsd.yaml @@ -0,0 +1,22 @@ +nsd: + nsd: + - description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: 'true' + vnfd-id: + - openldap_knf diff --git a/osmops/nbic/packages_test_dir/unsupported/openldap_vnfd.yaml b/osmops/nbic/packages_test_dir/unsupported/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/osmops/nbic/packages_test_dir/unsupported/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/one.yaml b/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/one.yaml new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/one.yaml @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/two.yaml b/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/two.yaml new file mode 100644 index 0000000..d8263ee --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_many_desc/openldap_knf/two.yaml @@ -0,0 +1 @@ +2 \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/one.yaml b/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/one.yaml new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/one.yaml @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/two.yaml b/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/two.yaml new file mode 100644 index 0000000..d8263ee --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_many_desc/openldap_ns/two.yaml @@ -0,0 +1 @@ +2 \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/update_no_desc/openldap_knf/README.md b/osmops/nbic/packages_test_dir/update_no_desc/openldap_knf/README.md new file mode 100644 index 0000000..8e3ae1a --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_no_desc/openldap_knf/README.md @@ -0,0 +1,2 @@ +No YAML files in here. +There should be an error when updating the package. \ No newline at end of file diff --git a/osmops/nbic/packages_test_dir/update_no_desc/openldap_ns/README.md b/osmops/nbic/packages_test_dir/update_no_desc/openldap_ns/README.md new file mode 100644 index 0000000..8e3ae1a --- /dev/null +++ b/osmops/nbic/packages_test_dir/update_no_desc/openldap_ns/README.md @@ -0,0 +1,2 @@ +No YAML files in here. +There should be an error when updating the package. \ No newline at end of file diff --git a/osmops/nbic/vimaccounts.go b/osmops/nbic/vimaccounts.go new file mode 100644 index 0000000..f55a8cd --- /dev/null +++ b/osmops/nbic/vimaccounts.go @@ -0,0 +1,56 @@ +package nbic + +import ( + "fmt" +) + +type vimAccountView struct { // only the response fields we care about. + Id string `json:"_id"` + Name string `json:"name"` +} + +type vimAccountMap map[string]string + +func buildVimAccountMap(vs []vimAccountView) vimAccountMap { + accountMap := map[string]string{} + for _, v := range vs { + accountMap[v.Name] = v.Id + } + return accountMap +} + +// NOTE. VIM account name to ID lookup. +// For our vimAccountMap to work, there must be a bijection between VIM account +// IDs and names. Lucklily, this is the case since OSM NBI enforces uniqueness +// of VIM account names. If you try creating a VIM account with the same name +// as an existing one, you get an error, e.g. +// +// HTTP/1.1 409 Conflict +// ... +// --- +// code: CONFLICT +// detail: name 'openvim-site' already exists for vim_accounts +// status: 409 + +func (c *Session) getVimAccounts() ([]vimAccountView, error) { + data := []vimAccountView{} + if _, err := c.getJson(c.conn.VimAccounts(), &data); err != nil { + return nil, err + } + return data, nil +} + +func (c *Session) lookupVimAccountId(name string) (string, error) { + if c.vimAccMap == nil { + if vs, err := c.getVimAccounts(); err != nil { + return "", err + } else { + c.vimAccMap = buildVimAccountMap(vs) + } + } + if id, ok := c.vimAccMap[name]; !ok { + return "", fmt.Errorf("no VIM account found for name ID: %s", name) + } else { + return id, nil + } +} diff --git a/osmops/nbic/vimaccounts_test.go b/osmops/nbic/vimaccounts_test.go new file mode 100644 index 0000000..34db8ba --- /dev/null +++ b/osmops/nbic/vimaccounts_test.go @@ -0,0 +1,71 @@ +package nbic + +import ( + "testing" +) + +func TestLookupVimAccIdUseCachedData(t *testing.T) { + nbic := &Session{ + vimAccMap: map[string]string{"silly_vim": "324567"}, + } + id, err := nbic.lookupVimAccountId("silly_vim") + if err != nil { + t.Errorf("want: 324567; got: %v", err) + } + if id != "324567" { + t.Errorf("want: 324567; got: %s", id) + } +} + +func TestLookupVimAccIdErrorOnMiss(t *testing.T) { + nbic := &Session{ + vimAccMap: map[string]string{"silly_vim": "324567"}, + } + if _, err := nbic.lookupVimAccountId("not there!"); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestLookupVimAccIdFetchDataFromServer(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + wantId := "4a4425f7-3e72-4d45-a4ec-4241186f3547" + if gotId, err := nbic.lookupVimAccountId("mylocation1"); err != nil { + t.Errorf("want: %s; got: %v", wantId, err) + } else { + if gotId != wantId { + t.Errorf("want: %s; got: %v", wantId, gotId) + } + } + + if len(nbi.exchanges) != 2 { + t.Fatalf("want: 2; got: %d", len(nbi.exchanges)) + } + rr1, rr2 := nbi.exchanges[0], nbi.exchanges[1] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.VimAccounts().Path { + t.Errorf("want: %s; got: %s", urls.VimAccounts().Path, rr2.req.URL.Path) + } +} + +func TestLookupVimAccIdFetchDataFromServerTokenError(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, UserCredentials{}, nbi.exchange) + + if _, err := nbic.lookupVimAccountId("mylocation1"); err == nil { + t.Errorf("want: error; got: nil") + } + + if len(nbi.exchanges) != 1 { + t.Fatalf("want: 1; got: %d", len(nbi.exchanges)) + } + rr1 := nbi.exchanges[0] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } +} diff --git a/osmops/nbic/vnfdescriptors.go b/osmops/nbic/vnfdescriptors.go new file mode 100644 index 0000000..e819bb9 --- /dev/null +++ b/osmops/nbic/vnfdescriptors.go @@ -0,0 +1,64 @@ +package nbic + +import ( + "fmt" +) + +type vnfDescView struct { // only the response fields we care about. + Id string `json:"_id"` + Name string `json:"id"` +} + +type vnfDescMap map[string]string + +func buildVnfDescMap(ds []vnfDescView) vnfDescMap { + descMap := map[string]string{} + for _, d := range ds { + descMap[d.Name] = d.Id + } + return descMap +} + +// NOTE. VNFD name to ID lookup. +// For our vnfDescMap to work, there must be a bijection between VNFD IDs and +// name IDs. Luckily, this is the case since OSM NBI enforces uniqueness of +// VNFD name IDs. If you try uploading another package with a VNFD having the +// same name ID of an existing one, OSM NBI will complain loudly, e.g. +// +// HTTP/1.1 409 Conflict +// ... +// { +// "code": "CONFLICT", +// "status": 409, +// "detail": "vnfd with id 'openldap_knf' already exists for this project" +// } + +func (c *Session) getVnfDescriptors() ([]vnfDescView, error) { + data := []vnfDescView{} + _, err := c.getJson(c.conn.VnfPackagesContent(), &data) + return data, err +} + +func (c *Session) lookupVnfDescriptorId(name string) (string, error) { + if c.vnfdMap == nil { + if ds, err := c.getVnfDescriptors(); err != nil { + return "", err + } else { + c.vnfdMap = buildVnfDescMap(ds) + } + } + if id, ok := c.vnfdMap[name]; !ok { + return "", &missingDescriptor{typ: "VNFD", name: name} + } else { + return id, nil + } +} + +type missingDescriptor struct { + typ string + name string +} + +func (e *missingDescriptor) Error() string { + return fmt.Sprintf("no %s found for name ID: %s", e.typ, e.name) +} diff --git a/osmops/nbic/vnfdescriptors_test.go b/osmops/nbic/vnfdescriptors_test.go new file mode 100644 index 0000000..c6e2c83 --- /dev/null +++ b/osmops/nbic/vnfdescriptors_test.go @@ -0,0 +1,87 @@ +package nbic + +import ( + "strings" + "testing" +) + +func TestLookupVnfDescIdUseCachedData(t *testing.T) { + nbic := &Session{ + vnfdMap: map[string]string{"silly_ns": "324567"}, + } + id, err := nbic.lookupVnfDescriptorId("silly_ns") + if err != nil { + t.Errorf("want: 324567; got: %v", err) + } + if id != "324567" { + t.Errorf("want: 324567; got: %s", id) + } +} + +func TestLookupVnfDescIdErrorOnMiss(t *testing.T) { + nbic := &Session{ + vnfdMap: map[string]string{"silly_ns": "324567"}, + } + if _, err := nbic.lookupVnfDescriptorId("not there!"); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestLookupVnfDescIdFetchDataFromServer(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, usrCreds, nbi.exchange) + + wantId := "4ffdeb67-92e7-46fa-9fa2-331a4d674137" + if gotId, err := nbic.lookupVnfDescriptorId("openldap_knf"); err != nil { + t.Errorf("want: %s; got: %v", wantId, err) + } else { + if gotId != wantId { + t.Errorf("want: %s; got: %v", wantId, gotId) + } + } + + if len(nbi.exchanges) != 2 { + t.Fatalf("want: 2; got: %d", len(nbi.exchanges)) + } + rr1, rr2 := nbi.exchanges[0], nbi.exchanges[1] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } + if rr2.req.URL.Path != urls.VnfPackagesContent().Path { + t.Errorf("want: %s; got: %s", urls.VnfPackagesContent().Path, rr2.req.URL.Path) + } +} + +func TestLookupVnfDescIdFetchDataFromServerTokenError(t *testing.T) { + nbi := newMockNbi() + urls := newConn() + nbic, _ := New(urls, UserCredentials{}, nbi.exchange) + + if _, err := nbic.lookupVnfDescriptorId("openldap_knf"); err == nil { + t.Errorf("want: error; got: nil") + } + + if len(nbi.exchanges) != 1 { + t.Fatalf("want: 1; got: %d", len(nbi.exchanges)) + } + rr1 := nbi.exchanges[0] + if rr1.req.URL.Path != urls.Tokens().Path { + t.Errorf("want: %s; got: %s", urls.Tokens().Path, rr1.req.URL.Path) + } +} + +func TestMissingDescErrFormat(t *testing.T) { + err := &missingDescriptor{ + typ: "x", + name: "y", + } + got := err.Error() + + if !strings.Contains(got, "x") { + t.Errorf("want: contains type; got: no type") + } + if !strings.Contains(got, "y") { + t.Errorf("want: contains name; got: no name") + } +} diff --git a/osmops/pkgr/build_pkg_test.go b/osmops/pkgr/build_pkg_test.go new file mode 100644 index 0000000..046aa9b --- /dev/null +++ b/osmops/pkgr/build_pkg_test.go @@ -0,0 +1,140 @@ +package pkgr + +import ( + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "testing" + "time" + + "github.com/martel-innovate/osmops/osmops/util/file" + "github.com/martel-innovate/osmops/osmops/util/tgz" +) + +func findTestDataDir(dataDirName string) file.AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDir := filepath.Join(enclosingDir, "build_pkg_test_dir", dataDirName) + p, _ := file.ParseAbsPath(testDataDir) + + return p +} + +const wantOpenLdapNsChecksumContent = ` +c122710acb043b99be209fefd9ae2032 openldap_ns/README.md +6cbc0db17616eff57c60efa0eb15ac76 openldap_ns/openldap_nsd.yaml +` + +var wantOpenLdapNsPaths = []string{ + "openldap_ns/README.md", "openldap_ns/openldap_nsd.yaml", + "openldap_ns/checksums.txt", +} + +func TestPackOpenLdapNs(t *testing.T) { + wantName := "openldap_ns" + wantHash := "cae4506d23753ee95f21faf8c6f97eaa" + verifyPackage(t, wantName, wantHash, wantOpenLdapNsChecksumContent, + wantOpenLdapNsPaths) +} + +const wantOpenLdapKnfChecksumContent = ` +7044f64c16d4ef3eeef7f8668a4dc5a1 openldap_knf/openldap_vnfd.yaml +` + +var wantOpenLdapKnfPaths = []string{ + "openldap_knf/openldap_vnfd.yaml", "openldap_knf/checksums.txt", +} + +func TestPackOpenLdapKnf(t *testing.T) { + wantName := "openldap_knf" + wantHash := "95389ba9b38e9a76b66789217a178e75" + verifyPackage(t, wantName, wantHash, wantOpenLdapKnfChecksumContent, + wantOpenLdapKnfPaths) +} + +const wantOpenLdapNestedChecksumContent = ` +c122710acb043b99be209fefd9ae2032 openldap_nested/README.md +7044f64c16d4ef3eeef7f8668a4dc5a1 openldap_nested/knf/openldap_vnfd.yaml +6cbc0db17616eff57c60efa0eb15ac76 openldap_nested/openldap_nsd.yaml +` + +var wantOpenLdapNestedPaths = []string{ + "openldap_nested/README.md", "openldap_nested/openldap_nsd.yaml", + "openldap_nested/knf/openldap_vnfd.yaml", + "openldap_nested/checksums.txt", +} + +func TestPackOpenLdapNested(t *testing.T) { + wantName := "openldap_nested" + wantHash := "5c2c2e459d0997fbef089499a7976812" + verifyPackage(t, wantName, wantHash, wantOpenLdapNestedChecksumContent, + wantOpenLdapNestedPaths) +} + +func verifyPackage(t *testing.T, wantName, wantHash, wantChecksum string, + wantPaths []string) { + source := findTestDataDir(wantName) + epochStart := time.Unix(0, 0) // (*) see NOTE + pkg, err := doPack(source, tgz.WithEntryTime(epochStart)) // (*) see NOTE + if err != nil { + t.Fatalf("want: no error; got: %v", err) + } + + if pkg.Name != wantName { + t.Errorf("want name: %s; got: %s", wantName, pkg.Name) + } + + if pkg.Source.Directory().Value() != source.Value() { + t.Errorf("want source: %v; got: %v", source, pkg.Source) + } + + if pkg.Hash != wantHash { + t.Errorf("want hash: %s; got: %s", wantHash, pkg.Hash) + } + + gotPaths, gotChecksum := pathsAndChecksumFile(t, pkg.Data) + wantChecksum = strings.TrimSpace(wantChecksum) + gotChecksum = strings.TrimSpace(gotChecksum) + if gotChecksum != wantChecksum { + t.Errorf("want checksum: %s; got: %s", wantChecksum, gotChecksum) + } + sort.Strings(gotPaths) + sort.Strings(wantPaths) + if !reflect.DeepEqual(gotPaths, wantPaths) { + t.Errorf("want paths: %v; got: %v", wantPaths, gotPaths) + } +} + +// NOTE. Reproducible package hashes. +// Even if the files that make up the package are always the same, their +// mod/change/access time can be different at different times---think of +// e.g. a fresh checkout, merging a branch, opening one of the files, etc. +// The tar entry tgz creates for each file includes the mod/change/access +// time. So to make our test reproducible we've got to make sure we always +// get the same hash, which is why we set those times to the epoch's start. + +func pathsAndChecksumFile(t *testing.T, data io.ReadCloser) ([]string, string) { + reader, err := tgz.NewReader(data) + if err != nil { + t.Fatalf("couldn't create tgz reader: %v", err) + } + defer reader.Close() + + paths := []string{} + checksums := "" + reader.IterateEntries( + func(archivePath string, fi os.FileInfo, content io.Reader) error { + paths = append(paths, archivePath) + if strings.HasSuffix(archivePath, ChecksumFileName) { + buf, _ := io.ReadAll(content) + checksums = string(buf) + } + return nil + }) + + return paths, checksums +} diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_knf/openldap_vnfd.yaml b/osmops/pkgr/build_pkg_test_dir/openldap_knf/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_knf/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_nested/README.md b/osmops/pkgr/build_pkg_test_dir/openldap_nested/README.md new file mode 100644 index 0000000..8424611 --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_nested/README.md @@ -0,0 +1,26 @@ +# SIMPLE OPEN-LDAP CHART + +Descriptors that installs an openldap version 1.2.1 chart in a K8s cluster + +There is one VNF (openldap\_vnf) with only one KDU. + +There is one NS that connects the VNF to a mgmt network + +## Onboarding and instantiation + +```bash +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --ssh_keys ${HOME}/.ssh/id_rsa.pub +``` + +### Instantiation option + +Some parameters could be passed during the instantiation. + +* replicaCount: Number of Open LDAP replicas that will be created + +```bash +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", "additionalParams": {"replicaCount": "2"}}]}' +``` + diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_nested/knf/openldap_vnfd.yaml b/osmops/pkgr/build_pkg_test_dir/openldap_nested/knf/openldap_vnfd.yaml new file mode 100644 index 0000000..41795bf --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_nested/knf/openldap_vnfd.yaml @@ -0,0 +1,18 @@ +vnfd: + description: KNF with single KDU using a helm-chart for openldap version 1.2.7 + df: + - id: default-df + ext-cpd: + - id: mgmt-ext + k8s-cluster-net: mgmtnet + id: openldap_knf + k8s-cluster: + nets: + - id: mgmtnet + kdu: + - name: ldap + helm-chart: stable/openldap:1.2.7 + mgmt-cp: mgmt-ext + product-name: openldap_knf + provider: Telefonica + version: '1.0' diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_nested/openldap_nsd.yaml b/osmops/pkgr/build_pkg_test_dir/openldap_nested/openldap_nsd.yaml new file mode 100644 index 0000000..b995164 --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_nested/openldap_nsd.yaml @@ -0,0 +1,22 @@ +nsd: + nsd: + - description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: 'true' + vnfd-id: + - openldap_knf diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_ns/README.md b/osmops/pkgr/build_pkg_test_dir/openldap_ns/README.md new file mode 100644 index 0000000..8424611 --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_ns/README.md @@ -0,0 +1,26 @@ +# SIMPLE OPEN-LDAP CHART + +Descriptors that installs an openldap version 1.2.1 chart in a K8s cluster + +There is one VNF (openldap\_vnf) with only one KDU. + +There is one NS that connects the VNF to a mgmt network + +## Onboarding and instantiation + +```bash +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --ssh_keys ${HOME}/.ssh/id_rsa.pub +``` + +### Instantiation option + +Some parameters could be passed during the instantiation. + +* replicaCount: Number of Open LDAP replicas that will be created + +```bash +osm ns-create --ns_name ldap --nsd_name openldap_ns --vim_account | --config '{additionalParamsForVnf: [{"member-vnf-index": "openldap", "additionalParams": {"replicaCount": "2"}}]}' +``` + diff --git a/osmops/pkgr/build_pkg_test_dir/openldap_ns/openldap_nsd.yaml b/osmops/pkgr/build_pkg_test_dir/openldap_ns/openldap_nsd.yaml new file mode 100644 index 0000000..b995164 --- /dev/null +++ b/osmops/pkgr/build_pkg_test_dir/openldap_ns/openldap_nsd.yaml @@ -0,0 +1,22 @@ +nsd: + nsd: + - description: NS consisting of a single KNF openldap_knf connected to mgmt network + designer: OSM + df: + - id: default-df + vnf-profile: + - id: openldap + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: openldap + constituent-cpd-id: mgmt-ext + virtual-link-profile-id: mgmtnet + vnfd-id: openldap_knf + id: openldap_ns + name: openldap_ns + version: '1.0' + virtual-link-desc: + - id: mgmtnet + mgmt-network: 'true' + vnfd-id: + - openldap_knf diff --git a/osmops/pkgr/checksum.go b/osmops/pkgr/checksum.go new file mode 100644 index 0000000..2a339ed --- /dev/null +++ b/osmops/pkgr/checksum.go @@ -0,0 +1,37 @@ +package pkgr + +import ( + "crypto/md5" + "fmt" + "io" + "os" + "strings" + + "github.com/martel-innovate/osmops/osmops/util/bytez" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +const ChecksumFileName = "checksums.txt" + +func md5string(data []byte) string { + hash := md5.Sum(data) + return fmt.Sprintf("%x", hash) +} + +func computeChecksum(target file.AbsPath) (string, error) { + content, err := os.ReadFile(target.Value()) + if err != nil { + return "", err + } + return md5string(content), nil +} + +func writeCheckSumFileContent(src PackageSource) io.Reader { + buf := bytez.NewBuffer() + for _, filePath := range src.SortedFilePaths() { + hash := src.FileHash(filePath) + line := fmt.Sprintf("%s\t%s\n", hash, filePath) + io.Copy(buf, strings.NewReader(line)) + } + return buf +} diff --git a/osmops/pkgr/checksum_test.go b/osmops/pkgr/checksum_test.go new file mode 100644 index 0000000..4ba11f0 --- /dev/null +++ b/osmops/pkgr/checksum_test.go @@ -0,0 +1,51 @@ +package pkgr + +import ( + "io/fs" + "io/ioutil" + "os" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +var md5stringFixtures = []struct { + input []byte + want string // go's md5 should be the same as md5sum +}{ + // $ touch empty; md5sum empty + {nil, "d41d8cd98f00b204e9800998ecf8427e"}, + {[]byte{}, "d41d8cd98f00b204e9800998ecf8427e"}, + // $ echo -n 1 | md5sum + {[]byte{49}, "c4ca4238a0b923820dcc509a6f75849b"}, + // $ echo -n 12 | md5sum + {[]byte{49, 50}, "c20ad4d76fe97759aa27a0c99bff6710"}, +} + +func TestMd5string(t *testing.T) { + for k, d := range md5stringFixtures { + got := md5string(d.input) + if got != d.want { + t.Errorf("[%d] want: %s; got: %s", k, d.want, got) + } + } +} + +func TestComputeChecksumFileAccessErr(t *testing.T) { + fd, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("couldn't create temp file: %v", err) + } + defer os.Remove(fd.Name()) + fd.Close() + os.Chmod(fd.Name(), 0200) // computeChecksum can't read it + + filePath, err := file.ParseAbsPath(fd.Name()) + if err != nil { + t.Fatalf("couldn't create temp file: %v", err) + } + _, err = computeChecksum(filePath) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: path error; got: %v", err) + } +} diff --git a/osmops/pkgr/pack.go b/osmops/pkgr/pack.go new file mode 100644 index 0000000..c46288a --- /dev/null +++ b/osmops/pkgr/pack.go @@ -0,0 +1,65 @@ +package pkgr + +import ( + "io" + + "github.com/martel-innovate/osmops/osmops/util/bytez" + "github.com/martel-innovate/osmops/osmops/util/file" + "github.com/martel-innovate/osmops/osmops/util/tgz" +) + +// Pack creates an OSM package from the source files contained in the +// specified directory. Pack writes the entire package content into a +// memory buffer instead of streaming it. This shouldn't be a problem +// since packages are usually very small, like less than 1Kb. +func Pack(source file.AbsPath) (*Package, error) { + return doPack(source, tgz.WithBestCompression()) +} + +// added for testability +func doPack(source file.AbsPath, opts ...tgz.WriterOption) (*Package, error) { + sink := bytez.NewBuffer() + pkgSource := newPkgSrc(source) + if err := writePackageData(pkgSource, sink, opts...); err != nil { + return nil, err + } + return makePackage(pkgSource, sink), nil +} + +func writePackageData(source *pkgSrc, sink io.WriteCloser, opts ...tgz.WriterOption) error { + archiveBaseDirName := source.DirectoryName() + writer, err := tgz.NewWriter(archiveBaseDirName, sink, opts...) + if err != nil { + return err + } + defer writer.Close() + + if err := collectPackageItems(source, writer); err != nil { + return err + } + return addChecksumFile(source, writer) +} + +func collectPackageItems(source *pkgSrc, writer tgz.Writer) error { + scanner := file.NewTreeScanner(source.Directory()) + visitor := makeSourceVisitor(source, writer) + if es := scanner.Visit(visitor); len(es) > 0 { + return es[0] + } + return nil +} + +func makeSourceVisitor(source *pkgSrc, writer tgz.Writer) file.Visitor { + collectFile := writer.Visitor() + return func(node file.TreeNode) error { + if err := collectFile(node); err != nil { + return err + } + return source.addFileHash(node) + } +} + +func addChecksumFile(source *pkgSrc, writer tgz.Writer) error { + content := writeCheckSumFileContent(source) + return writer.AddEntry(ChecksumFileName, content) +} diff --git a/osmops/pkgr/pack_test.go b/osmops/pkgr/pack_test.go new file mode 100644 index 0000000..032a119 --- /dev/null +++ b/osmops/pkgr/pack_test.go @@ -0,0 +1,36 @@ +package pkgr + +import ( + "io/ioutil" + "os" + "path" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func TestPackErrOnSourceDirAccess(t *testing.T) { + tempDir, err := ioutil.TempDir("", "pkgr-test") + if err != nil { + t.Fatalf("couldn't create temp dir: %v", err) + } + defer os.RemoveAll(tempDir) + + sourceDir, _ := file.ParseAbsPath(tempDir) + contentFile := path.Join(tempDir, "content") + + os.WriteFile(contentFile, []byte{}, 0200) // Pack's visitor can't open it + + _, err = Pack(sourceDir) + if _, ok := err.(*file.VisitError); !ok { + t.Errorf("want: visit error; got: %v", err) + } +} + +func TestWritePackageDataErrOnNilSink(t *testing.T) { + srcDir, _ := file.ParseAbsPath("no/where") + pkgSrc := newPkgSrc(srcDir) + if err := writePackageData(pkgSrc, nil); err == nil { + t.Errorf("want: nil error; got: no error") + } +} diff --git a/osmops/pkgr/types.go b/osmops/pkgr/types.go new file mode 100644 index 0000000..5523400 --- /dev/null +++ b/osmops/pkgr/types.go @@ -0,0 +1,134 @@ +package pkgr + +import ( + "io" + "os" + "path" + "path/filepath" + "sort" + + "github.com/martel-innovate/osmops/osmops/util/bytez" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// Package holds the data that make up an OSM package. +type Package struct { + // The package name. Conventionally this is the same as the name of + // the directory containing the source files. We also follow this + // convention. + Name string + // Metadata about the package source files. + Source PackageSource + // Gzipped tar stream containing the package source files plus a + // checksum file. + // + // Each source file is archived at a path "r/p" where r is the name of + // the directory containing the package source files and p is the file's + // path relative to r. For example, if a package source directory "my-pkg" + // contained a file "d/f", f's path in the archive would be "my-pkg/d/f". + // + // The stream also contains a checksum file at path "r/checksums.txt", + // where r is the name of the directory containing the package source + // files. This file has an MD5 hash entry in correspondence of each file + // found in the package source directory and subdirectories. Each entry + // is a text line starting with the MD5 of the file, followed by a tab + // and then the path of the file in the archive. Here's an example: + // + // c122710acb043b99be209fefd9ae2032 my-pkg/README.md + // 7044f64c16d4ef3eeef7f8668a4dc5a1 my-pkg/knf/vnfd.yaml + // 6cbc0db17616eff57c60efa0eb15ac76 my-pkg/nsd.yaml + // + Data io.ReadCloser + // MD5 hash of the whole gzipped tar stream. + Hash string +} + +func makePackage(src PackageSource, data *bytez.Buffer) *Package { + return &Package{ + Name: src.DirectoryName(), + Source: src, + Data: data, + Hash: md5string(data.Bytes()), + } +} + +// PackageSource provides metadata about an OSM package's source files +// as well as their content. +type PackageSource interface { + // The root directory containing the package source files. + Directory() file.AbsPath + // The name of the root directory. + DirectoryName() string + // Relative paths of the files in the package source directory and + // subdirectories. Each path is prefixed by the package source directory's + // name. For example, if "my-pkg" is the root and there's a file "f" at + // "d/f", the corresponding path returned by this method is "my-pkg/d/f". + // This method returns the paths sorted in alphabetical order. + SortedFilePaths() []string + // Lookup the MD5 hash of a source file in the package. + // The filePath argument must be one of the paths returned by + // SortedFilePaths. + FileHash(filePath string) string + // FileContent returns the bytes that make up the specified file in the + // package. + // The filePath argument must be one of the paths returned by + // SortedFilePaths. + FileContent(filePath string) ([]byte, error) +} + +type pkgSrc struct { + srcDir file.AbsPath + srcDirName string + pathToHashMap map[string]string +} + +func newPkgSrc(srcDir file.AbsPath) *pkgSrc { + return &pkgSrc{ + srcDir: srcDir, + srcDirName: path.Base(srcDir.Value()), + pathToHashMap: make(map[string]string), + } +} + +func (p *pkgSrc) Directory() file.AbsPath { + return p.srcDir +} + +func (p *pkgSrc) DirectoryName() string { + return p.srcDirName +} + +func (p *pkgSrc) SortedFilePaths() []string { + keys := make([]string, 0, len(p.pathToHashMap)) + for k := range p.pathToHashMap { + keys = append(keys, k) + } + sort.Strings(keys) + + return keys +} + +func (p *pkgSrc) FileHash(filePath string) string { + if hash, ok := p.pathToHashMap[filePath]; ok { + return hash + } + return "" +} + +func (p *pkgSrc) addFileHash(node file.TreeNode) error { + if !node.FsMeta.Mode().IsRegular() { + return nil + } + hash, err := computeChecksum(node.NodePath) + if err == nil { + baseNamePlusPath := path.Join(p.srcDirName, node.RelPath) + p.pathToHashMap[baseNamePlusPath] = hash + } + return err +} + +func (p *pkgSrc) FileContent(filePath string) ([]byte, error) { + filePathFromSrcDir, _ := filepath.Rel(p.srcDirName, filePath) + absPath := p.srcDir.Join(filePathFromSrcDir) + return os.ReadFile(absPath.Value()) +} diff --git a/osmops/pkgr/types_test.go b/osmops/pkgr/types_test.go new file mode 100644 index 0000000..39b55a1 --- /dev/null +++ b/osmops/pkgr/types_test.go @@ -0,0 +1,71 @@ +package pkgr + +import ( + "io/fs" + "os" + "reflect" + "strings" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func TestFileHashFailedLookup(t *testing.T) { + srcDir, _ := file.ParseAbsPath("no/where") + pkgSrc := newPkgSrc(srcDir) + got := pkgSrc.FileHash("not/there") + if got != "" { + t.Errorf("want: empty; got: %s", got) + } +} + +func TestFileContentReadsAllFileIntoMem(t *testing.T) { + source := findTestDataDir("openldap_nested") + targetFile := source.Join("knf/openldap_vnfd.yaml") + + want, err := os.ReadFile(targetFile.Value()) + if err != nil { + t.Fatalf("couldn't read file: %v", targetFile) + } + + pkg, err := Pack(source) + if err != nil { + t.Fatalf("couldn't pack: %v; error: %v", source, err) + } + + archiveFilePath := "" + for _, p := range pkg.Source.SortedFilePaths() { + if strings.HasSuffix(targetFile.Value(), p) { + archiveFilePath = p + break + } + } + got, err := pkg.Source.FileContent(archiveFilePath) + if err != nil { + t.Fatalf("want: read file; got: %v", err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestFileContentErrWhenUsingPathNotRetrievedFromSortedFilePaths(t *testing.T) { + source := findTestDataDir("openldap_nested") + pkg, err := Pack(source) + if err != nil { + t.Fatalf("couldn't pack: %v; error: %v", source, err) + } + + // missing pkg dir; SortedFilePaths would've returned: + // - openldap_nested/knf/openldap_vnfd.yaml + _, err = pkg.Source.FileContent("knf/openldap_vnfd.yaml") + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: path error; got: %v", err) + } + + // SortedFilePaths never returns empty path + _, err = pkg.Source.FileContent("") + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: path error; got: %v", err) + } +} diff --git a/osmops/util/bytez/buffer.go b/osmops/util/bytez/buffer.go new file mode 100644 index 0000000..7862815 --- /dev/null +++ b/osmops/util/bytez/buffer.go @@ -0,0 +1,35 @@ +package bytez + +import ( + "bytes" +) + +// Buffer wraps a bytes.Buffer to implement io.ReadCloser and io.WriteCloser. +type Buffer struct { + data *bytes.Buffer +} + +// NewBuffer returns a new empty memory buffer. +func NewBuffer() *Buffer { + return &Buffer{data: new(bytes.Buffer)} +} + +// Write implements io.Writer. +func (buf *Buffer) Write(p []byte) (n int, err error) { + return buf.data.Write(p) +} + +// Read implements io.Reader. +func (buf *Buffer) Read(p []byte) (n int, err error) { + return buf.data.Read(p) +} + +// Close implements io.Closer. +func (buf *Buffer) Close() error { + return nil +} + +// Bytes returns a slice holding the unread portion of the buffer. +func (buf *Buffer) Bytes() []byte { + return buf.data.Bytes() +} diff --git a/osmops/util/bytez/buffer_test.go b/osmops/util/bytez/buffer_test.go new file mode 100644 index 0000000..6dd712b --- /dev/null +++ b/osmops/util/bytez/buffer_test.go @@ -0,0 +1,84 @@ +package bytez + +import ( + "bytes" + "io" + "reflect" + "testing" +) + +const dataSize = 4 * 1024 + +func makeData() []byte { + data := make([]byte, dataSize) + for k := 0; k < dataSize; k++ { + data[k] = byte(k % 256) + } + return data +} + +func checkData(t *testing.T, data []byte) { + if len(data) != dataSize { + t.Errorf("want size: %d; got: %d", dataSize, len(data)) + } + for k := 0; k < len(data); k++ { + want := byte(k % 256) + if data[k] != want { + t.Errorf("[%d] want: %d; got: %d", k, want, data[k]) + } + } +} + +func writeAll(dest io.WriteCloser) { + defer dest.Close() + src := bytes.NewBuffer(makeData()) + io.Copy(dest, src) +} + +func readAll(src io.ReadCloser) []byte { + defer src.Close() + data, _ := io.ReadAll(src) + return data +} + +func TestWriteThenRead(t *testing.T) { + buf := NewBuffer() + writeAll(buf) + got := readAll(buf) + checkData(t, got) +} + +func TestBytesBeforeAnyRead(t *testing.T) { + buf := NewBuffer() + buf.Write([]byte{1, 2, 3}) + + got := buf.Bytes() + want := []byte{1, 2, 3} + if !reflect.DeepEqual(got, want) { + t.Errorf("want: %v; got: %v", want, got) + } + + got, _ = io.ReadAll(buf) + if !reflect.DeepEqual(got, want) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestBytesAfterRead(t *testing.T) { + buf := NewBuffer() + buf.Write([]byte{1, 2, 3}) + + firstTwo := make([]byte, 2) + buf.Read(firstTwo) + + want := []byte{1, 2} + if !reflect.DeepEqual(firstTwo, want) { + t.Errorf("want: %v; got: %v", want, firstTwo) + } + + got := buf.Bytes() + want = []byte{3} + if !reflect.DeepEqual(got, want) { + t.Errorf("want: %v; got: %v", want, got) + } +} diff --git a/osmops/util/file/paths.go b/osmops/util/file/paths.go new file mode 100644 index 0000000..fdb6da6 --- /dev/null +++ b/osmops/util/file/paths.go @@ -0,0 +1,114 @@ +package file + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" +) + +type AbsPath struct{ data string } + +func (d AbsPath) Value() string { + return d.data +} + +func IsStringPath(value interface{}) error { + s, _ := value.(string) + _, err := ParseAbsPath(s) + return err +} + +func ParseAbsPath(path string) (AbsPath, error) { + path = strings.TrimSpace(path) // (*) + if len(path) == 0 { + return AbsPath{}, + errors.New("must be a non-empty, non-whitespace-only string") + } + if p, err := filepath.Abs(path); err != nil { + return AbsPath{}, err + } else { + return AbsPath{data: p}, nil + } + + // (*) Abs doesn't trim space, e.g. Abs('/a/b ') == '/a/b '. +} + +func (d AbsPath) Join(relativePath string) AbsPath { + rest := strings.TrimSpace(relativePath) // (1) + return AbsPath{ + data: filepath.Join(d.Value(), rest), // (2) + } + + // (1) Join doesn't trim space, e.g. Join("/a", "/b ") == "/a/b " + // (2) In principle this is wrong since we don't know if relativePath + // is a valid path according to the FS we're running on. (Join doesn't + // check that.) So we could potentially return an inconsistent AbsPath. + // Go's standard lib is quite weak in the handling of abstract paths, + // i.e. independent of OS, so this is the best we can do. See e.g. + // - https://stackoverflow.com/questions/35231846 +} + +func (d AbsPath) IsDir() error { + if f, err := os.Stat(d.Value()); err != nil { + return err + } else { + if !f.IsDir() { + return fmt.Errorf("not a directory: %v", d.Value()) + } + } + return nil +} + +// ListPaths collects, recursively, the paths of all the directories and +// files inside dirPath. Each collected path is relative to dirPath, so +// for example, if dirPath = "b" and f is a file at "b/d/f", then "d/f" +// gets returned. ListPaths sorts the returned paths in alphabetical +// order. +func ListPaths(dirPath string) ([]string, []error) { + visitedPaths := []string{} + errs := []error{} + + targetDir, err := ParseAbsPath(dirPath) + if err != nil { + errs = append(errs, err) + return visitedPaths, errs + } + + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(func(node TreeNode) error { + if node.RelPath != "" { + visitedPaths = append(visitedPaths, node.RelPath) + } + return nil + }) + + errs = append(errs, es...) + sort.Strings(visitedPaths) + + return visitedPaths, errs +} + +// ListSubDirectoryNames returns the names of any directory found just +// below dirPath. ListSubDirectoryNames sorts the returned names in +// alphabetical order. Also, ListSubDirectoryNames will return an empty +// list if an error happens. +func ListSubDirectoryNames(dirPath string) ([]string, error) { + dirs := []string{} + entries, err := os.ReadDir(dirPath) + if err != nil { + return dirs, err + } + + for _, e := range entries { + if e.IsDir() { + dirs = append(dirs, e.Name()) + } + } + + sort.Strings(dirs) + + return dirs, nil +} diff --git a/osmops/util/file/paths_test.go b/osmops/util/file/paths_test.go new file mode 100644 index 0000000..663e2b4 --- /dev/null +++ b/osmops/util/file/paths_test.go @@ -0,0 +1,174 @@ +package file + +import ( + "io/fs" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +// TODO. The path tests will probably fail on Windows since we're using +// Unix paths. We could use filepath.Join to make most of them platform +// independent but I'm not sure how to make absolute paths though... + +var invalidPathFixtures = []string{"", " ", "\n", "\t "} + +func TestInvalidPath(t *testing.T) { + for k, d := range invalidPathFixtures { + if err := IsStringPath(d); err == nil { + t.Errorf("[%d] want: invalid; got: valid", k) + } + } +} + +var parsePathFixtures = []struct { + in string + want string + rel bool +}{ + {"/a/b/s", "/a/b/s", false}, {"r/e/l", "/r/e/l", true}, +} + +func TestParsePath(t *testing.T) { + for k, d := range parsePathFixtures { + if p, err := ParseAbsPath(d.in); err != nil { + t.Errorf("[%d] want: valid parse; got: %v", k, err) + } else { + if !d.rel && d.want != p.Value() { + t.Errorf("[%d] want: %s; got: %s", k, d.want, p.Value()) + } + if d.rel && !strings.HasSuffix(p.Value(), d.want) { + t.Errorf("[%d] want suffix: %s; got: %s", k, d.want, p.Value()) + } + } + } +} + +var joinPathFixtures = []struct { + base string + rel string + want string +}{ + {"/a", "", "/a"}, {"/a/", " ", "/a"}, {"/a", "\t", "/a"}, + {"/a/", "b ", "/a/b"}, {"/a", "b\n", "/a/b"}, {"/a/b", "//c", "/a/b/c"}, +} + +func TestJoinPath(t *testing.T) { + for k, d := range joinPathFixtures { + if base, err := ParseAbsPath(d.base); err != nil { + t.Errorf("[%d] want: valid parse; got: %v", k, err) + } else { + joined := base.Join(d.rel) + if joined.Value() != d.want { + t.Errorf("[%d] want: %s; got: %s", k, d.want, joined) + } + } + } +} + +func TestIsDir(t *testing.T) { + if pwd, err := ParseAbsPath("."); err != nil { + t.Errorf("want: valid parse; got: %v", err) + } else { + if err := pwd.IsDir(); err != nil { + t.Errorf("want: pwd is a directory; got: %v", err) + } + + notThere := pwd.Join("notThere") + if err := notThere.IsDir(); err == nil { + t.Errorf("want: not a directory; got directory: %v", notThere) + } + + if tempFile, err := ioutil.TempFile("", "prefix"); err != nil { + t.Errorf("couldn't create temp file: %v", err) + } else { + defer os.Remove(tempFile.Name()) + + if tf, err := ParseAbsPath(tempFile.Name()); err != nil { + t.Errorf("want: valid temp file parse; got: %v", err) + } else { + if err := tf.IsDir(); err == nil { + t.Errorf("want: not a dir; got dir: %v", tf) + } + } + } + } +} + +func assertListPaths(t *testing.T, dirIndex int, want []string) { + got, err := ListPaths(findTestDataDir(dirIndex).Value()) + if len(err) != 0 { + t.Fatalf("want: %v; got: %v", want, err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestListPathsOfFlatDir(t *testing.T) { + want := []string{"f1", "f2"} + assertListPaths(t, 1, want) +} + +func TestListPathsOfDirTree(t *testing.T) { + want := []string{ + "d1", "d1/f2", "d1/f3", + "d2", "d2/d3", "d2/d3/f6", "d2/f4", "d2/f5", + "f1", + } + assertListPaths(t, 2, want) +} + +func TestListPathsErrorOnInvalidTargetDir(t *testing.T) { + got, err := ListPaths("") + if err == nil { + t.Errorf("want error; got: %v", got) + } +} + +func TestListSubDirectoryNamesWhenNoSubdirs(t *testing.T) { + flatDir := findTestDataDir(1) + got, err := ListSubDirectoryNames(flatDir.Value()) + if err != nil { + t.Fatalf("want: empty list; got error: %v", err) + } + if len(got) != 0 { + t.Errorf("want: empty list; got: %v", got) + } +} + +func TestListSubDirectoryNamesWithDirTree(t *testing.T) { + dirTree := findTestDataDir(2) + want := []string{"d1", "d2"} + + got, err := ListSubDirectoryNames(dirTree.Value()) + if err != nil { + t.Fatalf("want: %v; got error: %v", want, err) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestListSubDirectoryNamesScanDirErr(t *testing.T) { + tempDir, err := ioutil.TempDir("", "file-test") + if err != nil { + t.Fatalf("couldn't create temp dir: %v", err) + } + defer os.RemoveAll(tempDir) + + os.Chmod(tempDir, 0200) // ListSubDirectoryNames can't scan it + + got, err := ListSubDirectoryNames(tempDir) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: path access error; got: %v", err) + } + if got == nil { + t.Errorf("want: empty names list; got: nil") + } + if len(got) != 0 { + t.Errorf("want: empty names list; got: %v", got) + } +} diff --git a/osmops/util/file/visitor.go b/osmops/util/file/visitor.go new file mode 100644 index 0000000..e25bb2d --- /dev/null +++ b/osmops/util/file/visitor.go @@ -0,0 +1,110 @@ +package file + +import ( + "fmt" + "io/fs" + "path/filepath" + "strings" +) + +// TreeNode holds information about a filesystem node traversed by a +// TreeScanner. +type TreeNode struct { + // The absolute path of the target directory being scanned. + RootPath AbsPath + // The absolute path of the node currently being visited. + NodePath AbsPath + // The path, relative to RootPath, of the node currently being visited. + // It'll be the empty string for the root node, i.e. the target directory, + // whereas it'll be the path to the current node from the target directory + // for any other node, e.g. "some/dir", "some/dir/file", etc. + // Also, for each visited node, including the target directory, you always + // have: RootPath + RelPath = NodePath. + RelPath string + // Filesystem metadata about the node currently being visited. + FsMeta fs.FileInfo +} + +// Visitor is a function the TreeScanner calls on traversing each node +// in a given directory tree. +type Visitor func(TreeNode) error + +// TreeScanner traverses a directory tree calling a visitor on each node. +type TreeScanner interface { + // Visit scans a given target directory recursively, calling the + // specified visitor on each filesystem node in the directory tree. + // Any I/O errors that happen while traversing the target directory + // tree get collected in the returned error buffer as VisitErrors. + // Ditto for any error returned by the visitor. + Visit(v Visitor) []error +} + +// VisitError wraps any error that happened while traversing the target +// directory with an additional path to indicate where the error happened. +type VisitError struct { + AbsPath string + Err error +} + +// Error implements the standard error interface. +func (e VisitError) Error() string { + return fmt.Sprintf("%s: %v", e.AbsPath, e.Err) +} + +// Unwrap implements Go's customary error unwrapping. +func (e VisitError) Unwrap() error { return e.Err } + +type scanner struct { + targetDir AbsPath +} + +// NewTreeScanner returns a TreeScanner to traverse the specified directory. +func NewTreeScanner(targetDir AbsPath) TreeScanner { + return &scanner{targetDir: targetDir} +} + +func (s *scanner) Visit(v Visitor) []error { + es := []error{} + if v != nil { + filepath.Walk(s.targetDir.Value(), // (*) + s.visitAllAndCollectErrors(v, &es)) + } else { + es = appendVisitError(s.targetDir.Value(), fmt.Errorf("nil visitor"), + es) + } + return es + + // (*) b/c targetDir is absolute, so is the path parameter passed to + // the lambda returned by visitAllAndCollectErrors---see Walk docs. +} + +func (s *scanner) visitAllAndCollectErrors( + visit Visitor, acc *[]error) filepath.WalkFunc { + return func(path string, info fs.FileInfo, err error) error { + if err != nil { + *acc = appendVisitError(path, err, *acc) + return nil + } + node := TreeNode{ + RootPath: s.targetDir, + NodePath: AbsPath{data: path}, // see note above about targetDir + RelPath: extractRelPath(s.targetDir.Value(), path), + FsMeta: info, + } + if err := visit(node); err != nil { + *acc = appendVisitError(path, err, *acc) + } + return nil + } +} + +func extractRelPath(base, node string) string { + sep := string(filepath.Separator) + pathFromBase := strings.TrimPrefix(node, base) + return strings.TrimPrefix(pathFromBase, sep) +} + +func appendVisitError(path string, err error, errors []error) []error { + visitError := &VisitError{AbsPath: path, Err: err} + return append(errors, visitError) +} diff --git a/osmops/util/file/visitor_test.go b/osmops/util/file/visitor_test.go new file mode 100644 index 0000000..38a7b60 --- /dev/null +++ b/osmops/util/file/visitor_test.go @@ -0,0 +1,176 @@ +package file + +import ( + "errors" + "fmt" + "io/fs" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "testing" +) + +func findTestDataDir(dirIndex int) AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDirName := fmt.Sprintf("test_%d", dirIndex) + testDataDir := filepath.Join(enclosingDir, "visitor_test_dir", + testDataDirName) + p, _ := ParseAbsPath(testDataDir) + + return p +} + +func assertPathInvariants(t *testing.T, node TreeNode) { + if filepath.IsAbs(node.RelPath) { + t.Errorf("want: rel path; got: %s", node.RelPath) + } + + nodePath := node.NodePath.Value() + joinedPath := path.Join(node.RootPath.Value(), node.RelPath) + if nodePath != joinedPath { + t.Errorf("want: root + rel = node; got: root + rel = %s, node = %s", + joinedPath, nodePath) + } + + nodeName := path.Base(nodePath) + if node.FsMeta.Name() != nodeName { + t.Errorf("want: node name = fs name; got: node name = %s, fs name = %s", + nodeName, node.FsMeta.Name()) + } +} + +func TestPathInvariants(t *testing.T) { + for k := 1; k < 3; k++ { + targetDir := findTestDataDir(k) + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(func(node TreeNode) error { + assertPathInvariants(t, node) + return nil + }) + if len(es) > 0 { + t.Errorf("want: no errors; got: %v", es) + } + } +} + +func TestNilVisitor(t *testing.T) { + targetDir := findTestDataDir(1) + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(nil) + if len(es) != 1 { + t.Errorf("want: nil visitor error; got: %v", es) + } +} + +func TestNonExistentTargetDir(t *testing.T) { + targetDir := findTestDataDir(0) + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(func(node TreeNode) error { + return nil + }) + if len(es) != 1 { + t.Errorf("want: non-existent target dir error; got: %v", es) + } +} + +func TestVisitCollectWalkError(t *testing.T) { + targetDir := findTestDataDir(1) + scanner := NewTreeScanner(targetDir).(*scanner) + es := []error{} + + fn := scanner.visitAllAndCollectErrors( + func(n TreeNode) error { + return nil + }, &es) + err := fmt.Errorf("I/O error while scanning the dir tree.") + var info fs.FileInfo + fn("/pa/th", info, err) + + if len(es) != 1 { + t.Errorf("want: one error; got: %v", es) + } + want := &VisitError{AbsPath: "/pa/th", Err: err} + if got, ok := es[0].(*VisitError); !ok || want.Error() != got.Error() { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestCollectAllErrors(t *testing.T) { + targetDir := findTestDataDir(2) + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(func(node TreeNode) error { + return fmt.Errorf("%s", node.NodePath.Value()) + }) + if len(es) != 10 { + t.Errorf("want: one error for each node; got: %v", es) + } + for _, e := range es { + if ve, ok := e.(*VisitError); !ok { + t.Errorf("want: VisitError; got: %v", e) + } else { + originalErrMgs := ve.Unwrap().Error() + if ve.AbsPath != originalErrMgs { + t.Errorf("want: %s; got: %s", originalErrMgs, ve.AbsPath) + } + + want := fmt.Sprintf("%s: %s", ve.AbsPath, ve.AbsPath) + if want != e.Error() { + t.Errorf("want: %s; got: %s", want, e.Error()) + } + } + } +} + +func assertVisitedPaths(t *testing.T, dirIndex int, want []string) { + visitedPaths := []string{} + targetDir := findTestDataDir(dirIndex) + scanner := NewTreeScanner(targetDir) + es := scanner.Visit(func(node TreeNode) error { + visitedPaths = append(visitedPaths, node.RelPath) + return nil + }) + + if len(es) != 0 { + t.Errorf("want: no errors; got: %v", es) + } + + sort.Strings(visitedPaths) + if !reflect.DeepEqual(want, visitedPaths) { + t.Errorf("want: %v; got: %v", want, visitedPaths) + } +} + +func TestVisitFlatDir(t *testing.T) { + want := []string{"", "f1", "f2"} + assertVisitedPaths(t, 1, want) +} + +func TestVisitDirTree(t *testing.T) { + want := []string{ + "", + "d1", "d1/f2", "d1/f3", + "d2", "d2/d3", "d2/d3/f6", "d2/f4", "d2/f5", + "f1", + } + assertVisitedPaths(t, 2, want) +} + +func TestVisitErrorStringRepr(t *testing.T) { + e := VisitError{AbsPath: "p", Err: fmt.Errorf("e")} + want := "p: e" + if e.Error() != want { + t.Errorf("want: %s; got: %s", want, e) + } +} + +func TestVisitErrorUnwrapping(t *testing.T) { + cause := fmt.Errorf("cause") + e := VisitError{AbsPath: "p", Err: cause} + got := errors.Unwrap(e) + if cause != got { + t.Errorf("want: %v; got: %v", cause, got) + } +} diff --git a/osmops/util/file/visitor_test_dir/test_1/f1 b/osmops/util/file/visitor_test_dir/test_1/f1 new file mode 100644 index 0000000..9dd7ac9 --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_1/f1 @@ -0,0 +1 @@ +f1 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_1/f2 b/osmops/util/file/visitor_test_dir/test_1/f2 new file mode 100644 index 0000000..70f80fc --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_1/f2 @@ -0,0 +1 @@ +f2 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/d1/f2 b/osmops/util/file/visitor_test_dir/test_2/d1/f2 new file mode 100644 index 0000000..70f80fc --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/d1/f2 @@ -0,0 +1 @@ +f2 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/d1/f3 b/osmops/util/file/visitor_test_dir/test_2/d1/f3 new file mode 100644 index 0000000..87471a8 --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/d1/f3 @@ -0,0 +1 @@ +f3 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/d2/d3/f6 b/osmops/util/file/visitor_test_dir/test_2/d2/d3/f6 new file mode 100644 index 0000000..e5bef7d --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/d2/d3/f6 @@ -0,0 +1 @@ +f6 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/d2/f4 b/osmops/util/file/visitor_test_dir/test_2/d2/f4 new file mode 100644 index 0000000..00cc282 --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/d2/f4 @@ -0,0 +1 @@ +f4 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/d2/f5 b/osmops/util/file/visitor_test_dir/test_2/d2/f5 new file mode 100644 index 0000000..968c356 --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/d2/f5 @@ -0,0 +1 @@ +f5 \ No newline at end of file diff --git a/osmops/util/file/visitor_test_dir/test_2/f1 b/osmops/util/file/visitor_test_dir/test_2/f1 new file mode 100644 index 0000000..9dd7ac9 --- /dev/null +++ b/osmops/util/file/visitor_test_dir/test_2/f1 @@ -0,0 +1 @@ +f1 \ No newline at end of file diff --git a/osmops/util/http/builders.go b/osmops/util/http/builders.go new file mode 100644 index 0000000..824ce52 --- /dev/null +++ b/osmops/util/http/builders.go @@ -0,0 +1,157 @@ +// Common ReqBuilder functions. + +package http + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + jsoniter "github.com/json-iterator/go" + + u "github.com/martel-innovate/osmops/osmops/util" +) + +var GET ReqBuilder = func(request *http.Request) error { + request.Method = "GET" + return nil +} + +var POST = func(request *http.Request) error { + request.Method = "POST" + return nil +} + +var PUT = func(request *http.Request) error { + request.Method = "PUT" + return nil +} + +func At(url *url.URL) ReqBuilder { + return func(request *http.Request) error { + if url == nil { + return errors.New("nil URL") + } + request.URL = url + request.Host = url.Host + return nil + } +} + +var MediaType = struct { + u.StrEnum + JSON, YAML, GZIP u.EnumIx +}{ + StrEnum: u.NewStrEnum("application/json", "application/yaml", + "application/gzip"), + JSON: 0, + YAML: 1, + GZIP: 2, +} + +func Content(mediaType u.EnumIx) ReqBuilder { + return func(request *http.Request) error { + request.Header.Set("Content-Type", MediaType.LabelOf(mediaType)) + return nil + } +} + +func Accept(mediaType ...u.EnumIx) ReqBuilder { + return func(request *http.Request) error { + ts := []string{} + for _, mt := range mediaType { + ts = append(ts, MediaType.LabelOf(mt)) + } + if len(ts) > 0 { + request.Header.Set("Accept", strings.Join(ts, ", ")) + } + + return nil + } + // TODO implement weights too? Not needed for OSM client. +} + +func Authorization(value string) ReqBuilder { + return func(request *http.Request) error { + request.Header.Set("Authorization", value) + return nil + } +} + +type BearerTokenProvider func() (string, error) + +func BearerToken(acquireToken BearerTokenProvider) ReqBuilder { + return func(request *http.Request) error { + if token, err := acquireToken(); err != nil { + return err + } else { + authValue := fmt.Sprintf("Bearer %s", token) + return Authorization(authValue)(request) + } + } +} + +func Body(content []byte) ReqBuilder { + return func(request *http.Request) error { + request.ContentLength = int64(len(content)) + + if len(content) == 0 { + // see code comments in Request.NewRequestWithContext about an + // empty body and backward compat. + request.Body = http.NoBody + request.GetBody = func() (io.ReadCloser, error) { + return http.NoBody, nil + } + } else { + request.Body = io.NopCloser(bytes.NewBuffer(content)) + + // the following code does the same as Request.NewRequestWithContext + // so 307 and 308 redirects can replay the body. + request.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(content) + return io.NopCloser(r), nil + } + } + + return nil + } +} + +func JsonBody(content interface{}) ReqBuilder { + return func(request *http.Request) error { + var json = jsoniter.ConfigCompatibleWithStandardLibrary // (*) + if data, err := json.Marshal(content); err != nil { + return err + } else { + return Body(data)(request) + } + } + // (*) json-iterator lib. + // We use it in place of json from Go's standard lib b/c it can handle + // the serialisation of fields of type map[interface {}]interface{} + // where the built-in json module will blow up w/ + // json: unsupported type: map[interface {}]interface{} + // If you're reading in YAML and then writing it out as JSON you could get + // bitten by this. For example, say you use "gopkg.in/yaml.v2" to read + // some YAML that has a field containing arbitrary JSON into a struct + // with a field X of type interface{}---you don't know what the JSON looks + // like, but later on you still want to be able to write it out. + // The YAML lib will read the JSON into X with a type of + // map[interface {}]interface{} + // but when you call the built-in json.Marshal, it'll blow up in your face + // b/c it doesn't know how to handle that type. + // See: + // - https://stackoverflow.com/questions/35377477 +} + +// TODO also implement streaming body? most of the standard libs aren't built +// w/ streaming in mind, so in practice you'll likely have the whole body in +// memory most of the time for common cases---e.g. JSON, YAML. + +// TODO nil pointer checks. Mostly not implemented!! Catch all occurrences +// of slices, i/f, function args and return an error if nil gets passed in. +// Then write test cases for each. What a schlep! diff --git a/osmops/util/http/builders_test.go b/osmops/util/http/builders_test.go new file mode 100644 index 0000000..c025ae1 --- /dev/null +++ b/osmops/util/http/builders_test.go @@ -0,0 +1,342 @@ +package http + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "testing" + + "gopkg.in/yaml.v2" + + u "github.com/martel-innovate/osmops/osmops/util" +) + +func TestSimpleGetRequest(t *testing.T) { + hp, _ := u.ParseHostAndPort("x:80") + url, _ := hp.Http("/a/b") + req, err := BuildRequest( + GET, At(url), + ) + + if err != nil { + t.Fatalf("want request, but got error: %v", err) + } + + wantMethod := "GET" + if req.Method != wantMethod { + t.Errorf("want: %s; got: %s", wantMethod, req.Method) + } + + wantUrl := "http://x:80/a/b" + if req.URL.String() != wantUrl { + t.Errorf("want: %s; got: %s", wantUrl, req.URL.String()) + } + + wantHost := "x:80" + if req.Host != wantHost { + t.Errorf("want: %s; got: %s", wantHost, req.Host) + } +} + +func TestSimplePostRequest(t *testing.T) { + hp, _ := u.ParseHostAndPort("x:80") + url, _ := hp.Http("/a/b") + content := []byte("42") + req, err := BuildRequest( + POST, At(url), + Body(content), + ) + + if err != nil { + t.Fatalf("want request, but got error: %v", err) + } + + wantMethod := "POST" + if req.Method != wantMethod { + t.Errorf("want: %s; got: %s", wantMethod, req.Method) + } + + wantUrl := "http://x:80/a/b" + if req.URL.String() != wantUrl { + t.Errorf("want: %s; got: %s", wantUrl, req.URL.String()) + } + + wantHost := "x:80" + if req.Host != wantHost { + t.Errorf("want: %s; got: %s", wantHost, req.Host) + } + + wantContentLength := int64(2) + if req.ContentLength != wantContentLength { + t.Errorf("want: %d; got: %d", wantContentLength, req.ContentLength) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("want: %v; got: %v", content, err) + } + if string(gotBody) != string(content) { + t.Errorf("want: %v; got: %v", content, gotBody) + } + + gotBodyReader, err := req.GetBody() + if err != nil { + t.Errorf("want %v; got: %v", content, err) + } + gotBody, err = ioutil.ReadAll(gotBodyReader) + if err != nil { + t.Errorf("want: %v; got: %v", content, err) + } + if string(gotBody) != string(content) { + t.Errorf("want: %v; got: %v", content, gotBody) + } +} + +func TestSimplePutRequest(t *testing.T) { + hp, _ := u.ParseHostAndPort("x:80") + url, _ := hp.Http("/a/b") + content := []byte("42") + req, err := BuildRequest( + PUT, At(url), + Body(content), + ) + + if err != nil { + t.Fatalf("want request, but got error: %v", err) + } + + wantMethod := "PUT" + if req.Method != wantMethod { + t.Errorf("want: %s; got: %s", wantMethod, req.Method) + } + + wantContentLength := int64(2) + if req.ContentLength != wantContentLength { + t.Errorf("want: %d; got: %d", wantContentLength, req.ContentLength) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("want: %v; got: %v", content, err) + } + if string(gotBody) != string(content) { + t.Errorf("want: %v; got: %v", content, gotBody) + } +} + +func TestEmptyBody(t *testing.T) { + content := []byte("") + req, err := BuildRequest( + Body(content), + ) + + if err != nil { + t.Fatalf("want request, but got error: %v", err) + } + + wantContentLength := int64(0) + if req.ContentLength != wantContentLength { + t.Errorf("want: %d; got: %d", wantContentLength, req.ContentLength) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("want empty body; got: %v", err) + } + if string(gotBody) != "" { + t.Errorf("want empty body; got: %v", gotBody) + } + + gotBodyReader, err := req.GetBody() + if err != nil { + t.Errorf("want empty body; got: %v", err) + } + gotBody, err = ioutil.ReadAll(gotBodyReader) + if err != nil { + t.Errorf("want empty body; got: %v", err) + } + if string(gotBody) != "" { + t.Errorf("want empty body; got: %v", gotBody) + } +} + +func TestJsonBodyNilContent(t *testing.T) { + req, err := BuildRequest( + JsonBody(nil), + ) + if err != nil { + t.Fatalf("want null JSON value; got: %v", err) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("want null JSON value; got: %v", err) + } + if string(gotBody) != "null" { + t.Errorf("want null JSON body; got: %v", string(gotBody)) + } +} + +func TestJsonBodyNonNilContent(t *testing.T) { + content := "yo!" + req, err := BuildRequest( + JsonBody(content), + ) + if err != nil { + t.Fatalf("want JSON body; got: %v", err) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("want: body; got: %v", err) + } + serializedContent := fmt.Sprintf(`"%s"`, content) + if string(gotBody) != serializedContent { + t.Errorf("want: %s; got: %v", serializedContent, string(gotBody)) + } +} + +type Unknown struct { + X interface{} `yaml:"x" json:"x"` +} + +func TestJsonBodyMarshalUnknownType(t *testing.T) { + serializedContent := `{"x":{"y":1}}` + yamlData := []byte(serializedContent) + content := Unknown{} + if err := yaml.Unmarshal(yamlData, &content); err != nil { + t.Fatalf("unmarshal: %v", err) + } + + req, err := BuildRequest( + JsonBody(content), + ) + if err != nil { + t.Fatalf("want JSON body; got: %v", err) + } + + gotBody, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("want: body; got: %v", err) + } + if string(gotBody) != serializedContent { + t.Errorf("want: %s; got: %v", serializedContent, string(gotBody)) + } +} + +func TestJsonBodyMarshalError(t *testing.T) { + notSerializable := func() {} + req, err := BuildRequest( + JsonBody(notSerializable), + ) + if err == nil { + t.Errorf("want JSON marshal error; got: %v", req) + } +} + +var acceptHeaderFixtures = []struct { + in []u.EnumIx + want string +}{ + {in: []u.EnumIx{}, want: ""}, + {in: []u.EnumIx{MediaType.JSON}, want: "Accept: application/json\r\n"}, + { + in: []u.EnumIx{MediaType.JSON, MediaType.YAML}, + want: "Accept: application/json, application/yaml\r\n", + }, +} + +func TestAcceptHeader(t *testing.T) { + for k, d := range acceptHeaderFixtures { + req, err := BuildRequest( + Accept(d.in...), + ) + if err != nil { + t.Fatalf("[%d] want request, but got error: %v", k, err) + } + + var buf bytes.Buffer + if err := req.Header.Write(&buf); err != nil { + t.Fatalf("[%d] want: %s; got: %v", k, d.want, err) + } + + got := buf.String() + if got != d.want { + t.Errorf("[%d] want: %s; got: %s", k, d.want, got) + } + } +} + +var contentTypeHeaderFixtures = []struct { + in u.EnumIx + want string +}{ + { + in: MediaType.JSON, + want: "Content-Type: application/json\r\n", + }, + { + in: MediaType.YAML, + want: "Content-Type: application/yaml\r\n", + }, + { + in: MediaType.GZIP, + want: "Content-Type: application/gzip\r\n", + }, +} + +func TestContentTypeHeader(t *testing.T) { + for k, d := range contentTypeHeaderFixtures { + req, err := BuildRequest( + Content(d.in), + ) + if err != nil { + t.Fatalf("[%d] want request, but got error: %v", k, err) + } + + var buf bytes.Buffer + if err := req.Header.Write(&buf); err != nil { + t.Fatalf("[%d] want: %s; got: %v", k, d.want, err) + } + + got := buf.String() + if got != d.want { + t.Errorf("[%d] want: %s; got: %s", k, d.want, got) + } + } +} + +func TestBearerTokenHeader(t *testing.T) { + tokenProvider := func() (string, error) { return "token", nil } + req, err := BuildRequest( + BearerToken(tokenProvider), + ) + + if err != nil { + t.Fatalf("want request, but got error: %v", err) + } + + want := "Authorization: Bearer token\r\n" + + var buf bytes.Buffer + if err := req.Header.Write(&buf); err != nil { + t.Fatalf("want: %s; got: %v", want, err) + } + + got := buf.String() + if got != want { + t.Errorf("want: %s; got: %s", want, got) + } +} + +func TestBearerTokenHeaderFail(t *testing.T) { + tokenProvider := func() (string, error) { return "", errors.New("ouch!") } + req, err := BuildRequest( + BearerToken(tokenProvider), + ) + + if err == nil { + t.Fatalf("want error, but got request: %v", req) + } +} diff --git a/osmops/util/http/client.go b/osmops/util/http/client.go new file mode 100644 index 0000000..636a6c1 --- /dev/null +++ b/osmops/util/http/client.go @@ -0,0 +1,193 @@ +// Utils to make your life slightly easier when working with the HTTP client +// from `net/http`. +// +// Request building +// +// You can put together a request assembly line by mixing and matching little, +// discrete, reusable pieces of functionality encapsulated by ReqBuilder +// functions. Request building reads like an HTTP request on the wire and is +// more type-safe than doing it the Go way. Example: +// +// req, err := BuildRequest( +// POST, At(url), +// Content(MediaType.JSON), +// Body(content), +// ) +// +// Response handling +// +// Similarly, ResHandler lets you build a response processing pipeline. You +// can use either or both request assembly and response pipeline facilities +// with Go's built-in HTTP client from `net/http`. Example: +// +// client := &http.Client{Timeout: time.Second * 10} +// if res, err := client.Do(req); err != nil { +// err = HandleResponse(req, jsonReader, responseLogger) +// // HandleResponse calls res.Body.Close for you if needed. +// } +// +// Message exchange +// +// Out of convenience, there's also an Exchange type to string builders and +// handlers together in an HTTP request-reply message flow where execution +// stops at the first error---so you don't have to litter your code with +// `if err ...` statements. Here's an example that also showcases some of +// the built-in ResHandlers. +// +// url, _ := url.Parse("http://yapi") +// responseData := YouData{} +// client := &http.Client{Timeout: time.Second * 10} +// Request( +// POST, At(url), +// Content(MediaType.JSON), +// Body(content), +// ). +// SetHandler(ExpectSuccess(), ReadJsonResponse(&responseData)). +// RunWith(client.Do) +// +// Notice RunWith takes a ReqSender so you can easily unit-test your code by +// swapping out an actual HTTP call with a stub. Example: +// +// send := func(req *http.Request) (*http.Response, error) { +// return &http.Response{StatusCode: 200}, nil +// } +// Request(...).RunWith(send) +// +package http + +import ( + "errors" + "fmt" + "net/http" +) + +// ReqBuilder sets some fields of an HTTP request, possibly returning an error +// if something goes wrong. +// You chain ReqBuilders to build a full HTTP request, each builder contributes +// its bit and the request building process stops at the first error. Basically +// a poor man's monomorphic either+IO monad stack---ask Google. +type ReqBuilder func(request *http.Request) error + +func emptyRequest() *http.Request { + bare := &http.Request{} + withCtx := bare.WithContext(bare.Context()) + withCtx.Header = make(http.Header) + return withCtx +} + +// BuildRequest runs the given builders to assemble an HTTP request. +// If all the builders run successfully, then the returned request is okay. +// Otherwise BuildRequest stops as soon as a builder errors out, returning +// that error. +func BuildRequest(builders ...ReqBuilder) (*http.Request, error) { + request := emptyRequest() + for _, build := range builders { + if err := build(request); err != nil { + return request, err + } + } + return request, nil +} + +// ResHandler processes an HTTP response. +// You can chain ResHandlers to do more than one thing with the response so +// the code stays modular---single responsibility principle, anyone? The +// response processing chain stops at the first error. Basically a poor man's +// monomorphic either+IO monad stack---ask Google. +type ResHandler interface { + // Handle processes the given response, possibly returning an error if + // something goes wrong. + Handle(response *http.Response) error +} + +// HandleResponse feeds the given response to each ResHandler. +// It runs the handlers in the same order as the input arguments, stopping +// at the first one that errors out and returning that error. If all the +// handlers are successful, the returned error will be nil. +// If the response contains a body, HandleResponse automatically closes the +// associated reader just before returning, so handlers don't have to do that. +func HandleResponse(response *http.Response, handlers ...ResHandler) error { + if response == nil { + return errors.New("nil response") + } + if response.Body != nil { + defer response.Body.Close() + } + for k, h := range handlers { + if h == nil { + return fmt.Errorf("nil response handler [%d]", k) + } + if err := h.Handle(response); err != nil { + return err + } + } + return nil +} + +// Exchange represents an HTTP request-reply message flow initiated by the +// client. +type Exchange struct { + builders []ReqBuilder + handlers []ResHandler +} + +// Request instantiates a new Exchange with the given ReqBuilder functions. +func Request(builders ...ReqBuilder) *Exchange { + return &Exchange{builders: builders} +} + +// SetHandler specifies the handlers that the Exchange will use to process +// the response. +func (e *Exchange) SetHandler(handlers ...ResHandler) *Exchange { + e.handlers = handlers + return e +} + +// ReqSender is a function to send an HTTP request and receive a response +// from the server. An error gets returned if something goes wrong---e.g. +// a network failure. +type ReqSender func(*http.Request) (*http.Response, error) + +// RunWith performs the HTTP message Exchange by building the request, +// invoking the given send function with it, and finally processing the +// response. +// +// The request gets built by calling the ReqBuilder functions passed to +// the Request factory function. If there's a request build error, then +// RunWith stops there, returning the error along with a nil response. +// +// Otherwise, if the request gets built properly, RunWith calls the given +// send function with it to carry out the client-server HTTP exchange. If +// the send function returns an error, RunWith stops there, returning the +// error and whatever response the send function returned---that would be +// nil in most cases. +// +// Otherwise, if the response was received successfully, RunWith passes it +// on to the handlers configured by SetHandler, in turn and in the same order +// as the handlers were passed to SetHandler. If there's no handlers, then +// the response gets returned without further processing along with a nil +// error. Similarly, if all handlers are successful, the response gets +// returned with a nil error. Otherwise the response gets returned with +// the error output by the first failed handler---RunWith won't call any +// handlers following the failed one. +func (e *Exchange) RunWith(send ReqSender) (*http.Response, error) { + if send == nil { + return nil, errors.New("nil ReqSender") + } + + req, err := BuildRequest(e.builders...) + if err != nil { + return nil, err + } + + res, err := send(req) + if err != nil { + return res, err + } + + return res, HandleResponse(res, e.handlers...) +} + +// TODO nil pointer checks. Mostly not implemented!! Catch all occurrences +// of slices, i/f, function args and return an error if nil gets passed in. +// Then write test cases for each. What a schlep! diff --git a/osmops/util/http/client_test.go b/osmops/util/http/client_test.go new file mode 100644 index 0000000..5958c6a --- /dev/null +++ b/osmops/util/http/client_test.go @@ -0,0 +1,159 @@ +package http + +import ( + "bytes" + "errors" + "net/http" + "testing" +) + +func TestExchangeRequestBuilderFailure(t *testing.T) { + res, err := Request(GET, At(nil)).RunWith(http.DefaultClient.Do) + if res != nil { + t.Errorf("want nil response; got: %v", res) + } + if err == nil { + t.Errorf("want request build error; got: nil") + } +} + +func TestExchangeStopOnSendFailure(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return &http.Response{}, errors.New("ouch!") + } + res, err := Request(GET).RunWith(send) + if res == nil { + t.Errorf("want empty response; got: nil") + } + if err == nil { + t.Errorf("want send error; got: nil") + } +} + +func TestExchangeErrorsOnNilResponse(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return nil, nil + } + res, err := Request(GET).RunWith(send) + if res != nil { + t.Errorf("want nil response; got: %v", res) + } + if err == nil { + t.Errorf("want handle error; got: nil") + } +} + +func TestExchangeNoHandleIfNoHandlers(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return &http.Response{}, nil + } + res, err := Request(GET).RunWith(send) + if res == nil { + t.Errorf("want empty response; got: nil") + } + if err != nil { + t.Errorf("want no error; got: %v", err) + } +} + +type EmptyBody struct { + bytes.Buffer + closed bool +} + +func (e *EmptyBody) Close() error { + e.closed = true + return nil +} + +func TestExchangeCloseBody(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return &http.Response{Body: &EmptyBody{}}, nil + } + res, err := Request(GET).RunWith(send) + if res == nil { + t.Errorf("want response; got: nil") + } + if err != nil { + t.Errorf("want no error; got: %v", err) + } + + //lint:ignore SA5011 linter obviously got confused about nil ptr here + body := (res.Body).(*EmptyBody) + if !body.closed { + t.Errorf("didn't close body stream on exit") + } +} + +type GrabStatusCode struct { + code int +} + +func (x *GrabStatusCode) Handle(response *http.Response) error { + x.code = response.StatusCode + return nil +} + +type FailingHandler struct{} + +func (x *FailingHandler) Handle(response *http.Response) error { + return errors.New("ouch!") +} + +func TestExchangeHandleResponseSuccessfully(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200}, nil + } + statusCodeGrabber := &GrabStatusCode{} + res, err := Request(GET). + SetHandler(statusCodeGrabber). + RunWith(send) + + if res == nil { + t.Errorf("want response; got: nil") + } + if err != nil { + t.Errorf("want no error; got: %v", err) + } + if statusCodeGrabber.code != 200 { + t.Errorf("want: 200; got: %d", statusCodeGrabber.code) + } +} + +func TestExchangeHandleResponseFailure(t *testing.T) { + send := func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200}, nil + } + statusCodeGrabber := &GrabStatusCode{} + res, err := Request(GET). + SetHandler(&FailingHandler{}, statusCodeGrabber). + RunWith(send) + + if res == nil { + t.Errorf("want response; got: nil") + } + if err == nil { + t.Errorf("want error; got: nil") + } + if statusCodeGrabber.code == 200 { + t.Errorf("want: don't run handlers following failed one") + } +} + +func TestRunWithNilReqSender(t *testing.T) { + if _, err := Request().RunWith(nil); err == nil { + t.Errorf("want error; got: nil") + } +} + +func TestHandleResponseWithNoHandlers(t *testing.T) { + if err := HandleResponse(&http.Response{}); err != nil { + t.Errorf("want: do nothing if no handler; got: %v", err) + } +} + +func TestHandleResponseErrorOnNilHandler(t *testing.T) { + if err := HandleResponse(&http.Response{}, nil); err == nil { + t.Errorf("want: error; got: nil") + } +} diff --git a/osmops/util/http/reshandlers.go b/osmops/util/http/reshandlers.go new file mode 100644 index 0000000..fb904a1 --- /dev/null +++ b/osmops/util/http/reshandlers.go @@ -0,0 +1,99 @@ +package http + +import ( + "fmt" + "net/http" + + jsoniter "github.com/json-iterator/go" + + "github.com/martel-innovate/osmops/osmops/util" +) + +type jsonResReader struct { + deserialized interface{} +} + +func (r *jsonResReader) Handle(res *http.Response) error { + if res == nil { + return fmt.Errorf("nil response") + } + if r.deserialized == nil { + return fmt.Errorf("nil deserialization target") + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary // (*) + decoder := json.NewDecoder(res.Body) + return decoder.Decode(r.deserialized) + + // (*) json-iterator lib. + // We use it in the JsonBody builder to work around encoding/json's + // inability to serialise map[interface {}]interface{} types. Here + // we're parsing JSON into a data structure and AFAICT the built-in + // json lib can parse pretty much any valid JSON you throw at it. + // So the only reason to use json-iterator in place of encoding/json + // is performance: json-iterator is way faster than encoding/json. +} + +// ReadJsonResponse builds a ResHandler to deserialise a JSON response body, +// returning any error that stopped it from deserializing the response body. +// +// Example. +// +// client := &http.Client{Timeout: time.Second * 10} +// target := &MyData{} +// Request( +// GET, At(url), +// Accept(MediaType.JSON), +// ). +// SetHandler(ExpectSuccess(), ReadJsonResponse(target)). +// RunWith(client.Do) +// +func ReadJsonResponse(target interface{}) ResHandler { + return &jsonResReader{deserialized: target} +} + +type expectSuccessfulResponse struct{} + +func (e expectSuccessfulResponse) Handle(res *http.Response) error { + if res.StatusCode < 200 || res.StatusCode > 299 { + return fmt.Errorf("expected successful response, got: %s", res.Status) + } + return nil +} + +// ExpectSuccess builds a ResHandler to check for successful responses. +// If the response code is in the range 200-299 (both inclusive), the +// returned ResHandler does nothing. Otherwise it returns an error---that +// stops any following ResHandler to run. +func ExpectSuccess() ResHandler { + return expectSuccessfulResponse{} +} + +// TODO. Implement expect for other status code ranges too? +// Informational responses (100–199) +// Successful responses (200–299) --> DONE +// Redirects (300–399) +// Client errors (400–499) +// Server errors (500–599) + +type expectStatusCodeInSet struct { + expectedStatusCodes util.IntSet +} + +func (e *expectStatusCodeInSet) Handle(res *http.Response) error { + if !e.expectedStatusCodes.Contains(res.StatusCode) { + return fmt.Errorf("unexpected response status: %s", res.Status) + } + return nil +} + +// ExpectStatusCodeOneOf builds a ResHandler to check the response status code +// is among the given ones. +// If the response code is in the given list, the returned ResHandler does +// nothing. Otherwise it returns an error---that stops any following ResHandler +// to run. +func ExpectStatusCodeOneOf(expectedStatusCode ...int) ResHandler { + return &expectStatusCodeInSet{ + expectedStatusCodes: util.ToIntSet(expectedStatusCode...), + } +} diff --git a/osmops/util/http/reshandlers_test.go b/osmops/util/http/reshandlers_test.go new file mode 100644 index 0000000..a94d20c --- /dev/null +++ b/osmops/util/http/reshandlers_test.go @@ -0,0 +1,134 @@ +package http + +import ( + "io" + "net/http" + "strings" + "testing" +) + +type TestData struct { + X int `json:"x"` + Y interface{} `json:"y"` +} + +func stringReader(data string) io.ReadCloser { + return io.NopCloser(strings.NewReader(data)) +} + +func send(response *http.Response) ReqSender { + return func(req *http.Request) (*http.Response, error) { + return response, nil + } +} + +func TestJsonReaderErrorOnNilResponse(t *testing.T) { + target := TestData{} + reader := ReadJsonResponse(&target) + if err := reader.Handle(nil); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestJsonReaderErrorOnNilTarget(t *testing.T) { + reader := ReadJsonResponse(nil) + if err := reader.Handle(&http.Response{}); err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestJsonReaderErrorOnUnexpectedResponseCode(t *testing.T) { + target := TestData{} + response := &http.Response{ + StatusCode: 400, + Body: stringReader(`{"x": 1, "y": {"z": 2}}`), + } + _, err := Request(GET). + SetHandler(ExpectSuccess(), ReadJsonResponse(&target)). + RunWith(send(response)) + if err == nil { + t.Errorf("want: error; got: nil") + } +} + +func TestJsonReaderGetData(t *testing.T) { + target := TestData{} + response := &http.Response{ + Body: stringReader(`{"x": 1, "y": {"z": 2}}`), + } + res, err := Request(GET). + SetHandler(ReadJsonResponse(&target)). + RunWith(send(response)) + + if err != nil { + t.Errorf("want: deserialized JSON; got: %v", err) + } + if res != response { + t.Errorf("want: %v; got: %v", response, res) + } + if target.X != 1.0 { + t.Errorf("want: deserialized JSON; got: %+v", target) + } + if y, ok := target.Y.(map[string]interface{}); !ok { + t.Errorf("want: deserialized JSON; got: %+v", target) + } else { + if y["z"] != 2.0 { + t.Errorf("want: deserialized JSON; got: %+v", target) + } + } +} + +func TestExpectSuccess(t *testing.T) { + response := &http.Response{} + for code := 200; code < 300; code++ { + response.StatusCode = code + _, err := Request(GET). + SetHandler(ExpectSuccess()). + RunWith(send(response)) + if err != nil { + t.Errorf("want: success; got: %v", err) + } + } + for _, code := range []int{100, 199, 300, 400, 500} { + response.StatusCode = code + _, err := Request(GET). + SetHandler(ExpectSuccess()). + RunWith(send(response)) + if err == nil { + t.Errorf("[%d] want: error; got: nil", code) + } + } +} + +func TestExpectStatusCodeOneOf(t *testing.T) { + response := &http.Response{} + want := []int{200, 201, 404} + for _, code := range want { + response.StatusCode = code + _, err := Request(GET). + SetHandler(ExpectStatusCodeOneOf(want...)). + RunWith(send(response)) + if err != nil { + t.Errorf("want: success; got: %v", err) + } + } + for _, code := range []int{100, 199, 300, 400, 500} { + response.StatusCode = code + _, err := Request(GET). + SetHandler(ExpectStatusCodeOneOf(want...)). + RunWith(send(response)) + if err == nil { + t.Errorf("[%d] want: error; got: nil", code) + } + } +} + +func TestExpectStatusCodeNone(t *testing.T) { + response := &http.Response{StatusCode: 200} + _, err := Request(GET). + SetHandler(ExpectStatusCodeOneOf()). + RunWith(send(response)) + if err == nil { + t.Errorf("want: error; got: nil") + } +} diff --git a/osmops/util/http/sec/session.go b/osmops/util/http/sec/session.go new file mode 100644 index 0000000..be4d3e2 --- /dev/null +++ b/osmops/util/http/sec/session.go @@ -0,0 +1,94 @@ +// sec provides basic means to manage the retrieval and refresh of auth tokens. +package sec + +import ( + "errors" + "fmt" +) + +// TokenStore defines the how to store and retrieve token data between calls. +type TokenStore interface { + // Get retrieves the the previously stored token if any. A nil return + // value means there's no token in the store. + Get() *Token + // Set stores the current token, discarding any previous token. + Set(t *Token) + // Clear removes the token from the store, if present. + Clear() +} + +// MemoryTokenStore stores tokens in memory. +type MemoryTokenStore struct { + token *Token +} + +// NOTE. We can implement a file system store too if needed, but I don't +// think it's going to be any useful at this stage. + +func (s *MemoryTokenStore) Get() *Token { + if s.token == nil { + return nil + } + return s.token +} + +func (s *MemoryTokenStore) Set(t *Token) { + s.token = t +} + +func (s *MemoryTokenStore) Clear() { + s.token = nil +} + +// TokenProvider acquires a fresh token from an auth endpoint, returning +// an error if something goes wrong. +type TokenProvider func() (*Token, error) + +// TokenManager manages the storage and lifecycle of tokens. +type TokenManager struct { + acquireToken TokenProvider + store TokenStore +} + +// NewTokenManager instantiates a TokenManager, returning an error if any of +// the inputs are nil. +func NewTokenManager(provider TokenProvider, store TokenStore) ( + *TokenManager, error) { + if provider == nil { + return nil, errors.New("nil provider") + } + if store == nil { + return nil, errors.New("nil store") + } + return &TokenManager{ + acquireToken: provider, + store: store, + }, nil +} + +// GetAccessToken retrieves a valid access token if possible, otherwise it +// returns an error. A token is valid if it can still be used for at least +// 30 seconds before it expires. +// If a valid token is in the store, then GetAccessToken returns it. Otherwise +// it delegates the fetching of a fresh token to the TokenProvider. If the +// provider can acquire a valid token, then the token gets stored in the +// TokenStore before returning it. In all other cases, GetAccessToken returns +// an error. +func (m *TokenManager) GetAccessToken() (*Token, error) { + currentToken := m.store.Get() + if currentToken != nil && currentToken.SecondsLeftBeforeExpiry() > 30 { + return currentToken, nil + } + + m.store.Clear() + if newToken, err := m.acquireToken(); err != nil { + return nil, err + } else { + if newToken.HasExpired() { + return nil, fmt.Errorf( + "auth endpoint returned expired token: %+v", *newToken) + } + m.store.Set(newToken) + return newToken, nil + } +} diff --git a/osmops/util/http/sec/session_test.go b/osmops/util/http/sec/session_test.go new file mode 100644 index 0000000..470fa74 --- /dev/null +++ b/osmops/util/http/sec/session_test.go @@ -0,0 +1,130 @@ +package sec + +import ( + "errors" + "fmt" + "testing" + "time" +) + +var newTokenManagerErrorIfNilArgsFixtures = []struct { + provider TokenProvider + store TokenStore +}{ + {nil, nil}, {nil, &MemoryTokenStore{}}, + {func() (*Token, error) { return nil, nil }, nil}, +} + +func TestNewTokenManagerErrorIfNilArgs(t *testing.T) { + for k, d := range newTokenManagerErrorIfNilArgsFixtures { + if _, err := NewTokenManager(d.provider, d.store); err == nil { + t.Errorf("[%d] want error; got: nil", k) + } + } +} + +type fakeProvider struct { + callCount int + lastToken *Token +} + +func (p *fakeProvider) generateToken(secondsValid time.Duration) ( + *Token, error) { + p.callCount += 1 + data := fmt.Sprintf("secret-%d", p.callCount) + p.lastToken = NewToken(data, secondsAfterNow(secondsValid)) + return p.lastToken, nil +} + +func (p *fakeProvider) fetchNewValidToken() (*Token, error) { + return p.generateToken(600) +} + +func (p *fakeProvider) fetchNewExpiredToken() (*Token, error) { + return p.generateToken(0) +} + +func (p *fakeProvider) fetchError() (*Token, error) { + return nil, errors.New("ouch!") +} + +func TestFetchFreshToken(t *testing.T) { + provider := &fakeProvider{} + store := &MemoryTokenStore{} + mngr, _ := NewTokenManager(provider.fetchNewValidToken, store) + + token, err := mngr.GetAccessToken() + if err != nil { + t.Fatalf("want: token; got: %v", err) + } + if token != provider.lastToken { + t.Errorf("want: %v; got: %v", provider.lastToken, token) + } + if provider.callCount != 1 { + t.Errorf("want: 1; got: %d", provider.callCount) + } + if token != store.token { + t.Errorf("want: %v; got: %v", token, store.token) + } +} + +func TestUseTokenInStore(t *testing.T) { + provider := &fakeProvider{} + store := &MemoryTokenStore{ + token: NewToken("data", secondsAfterNow(600)), + } + mngr, _ := NewTokenManager(provider.fetchNewValidToken, store) + + token, err := mngr.GetAccessToken() + if err != nil { + t.Fatalf("want: token; got: %v", err) + } + if token != store.token { + t.Errorf("want: %v; got: %v", token, store.token) + } + if provider.callCount != 0 { + t.Errorf("want: 0; got: %d", provider.callCount) + } +} + +func TestRefreshTokenAboutToExpire(t *testing.T) { + provider := &fakeProvider{} + store := &MemoryTokenStore{ + token: NewToken("data", secondsAfterNow(10)), + } + mngr, _ := NewTokenManager(provider.fetchNewValidToken, store) + + token, err := mngr.GetAccessToken() + if err != nil { + t.Fatalf("want: token; got: %v", err) + } + if token != provider.lastToken { + t.Errorf("want: %v; got: %v", provider.lastToken, token) + } + if token != store.token { + t.Errorf("want: %v; got: %v", token, store.token) + } + if provider.callCount != 1 { + t.Errorf("want: 1; got: %d", provider.callCount) + } +} + +func TestFetchNewExpiredToken(t *testing.T) { + provider := &fakeProvider{} + store := &MemoryTokenStore{} + mngr, _ := NewTokenManager(provider.fetchNewExpiredToken, store) + + if token, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: %v", token) + } +} + +func TestFetchError(t *testing.T) { + provider := &fakeProvider{} + store := &MemoryTokenStore{} + mngr, _ := NewTokenManager(provider.fetchError, store) + + if token, err := mngr.GetAccessToken(); err == nil { + t.Errorf("want: error; got: %v", token) + } +} diff --git a/osmops/util/http/sec/token.go b/osmops/util/http/sec/token.go new file mode 100644 index 0000000..f46fa7e --- /dev/null +++ b/osmops/util/http/sec/token.go @@ -0,0 +1,54 @@ +package sec + +import ( + "time" +) + +// Token represents an opaque token string credential with an expiry date. +type Token struct { + expiresAt time.Time + data string +} + +// SecondsLeftBeforeExpiry returns the number of seconds until the token +// expires or 0 if the token has expired already. +func (t *Token) SecondsLeftBeforeExpiry() uint64 { + now := time.Now() + delta := t.expiresAt.Sub(now) + if delta.Seconds() < 0 { + return 0 + } + return uint64(delta.Seconds()) +} + +// HasExpired tells if the token has gone past its expiry date. +func (t *Token) HasExpired() bool { + return t.SecondsLeftBeforeExpiry() == 0 +} + +// String returns the wire representation of the token. +func (t *Token) String() string { + return t.data +} + +// NewToken creates a Token from its wire representation and expiry date as +// number of fractional seconds since the Epoch. +func NewToken(data string, expiry float64) *Token { + secondsSinceTheEpoch := int64(expiry) // (1, 2) + if secondsSinceTheEpoch < 0 { + secondsSinceTheEpoch = 0 + } + return &Token{ + expiresAt: time.Unix(secondsSinceTheEpoch, 0), + data: data, + } + // NOTE + // 1. Truncation. The Go spec says the cast from float to int drops + // the fractional part, which is what we want since we're working with + // seconds here. But in general, you gotta be careful with that kind of + // cast, since e.g. 1.99999999 would get converted to 1. Again, a second + // difference isn't really a big deal here. If we ever need that kind of + // accuracy, here's the solution: + // - https://stackoverflow.com/questions/8022389 + // 2. Overflow. If expiry is NaN, secondsSinceTheEpoch will be negative. +} diff --git a/osmops/util/http/sec/token_test.go b/osmops/util/http/sec/token_test.go new file mode 100644 index 0000000..0a41030 --- /dev/null +++ b/osmops/util/http/sec/token_test.go @@ -0,0 +1,77 @@ +package sec + +import ( + "math" + "testing" + "time" +) + +var tokenDataFixtures = []string{ + "", " ", "xxx", "x\ny", "fbb-34v-99==", +} + +func TestTokenData(t *testing.T) { + for k, d := range tokenDataFixtures { + token := NewToken(d, 0) + got := token.String() + if got != d { + t.Errorf("[%d] want: %s; got: %s", k, d, got) + } + } +} + +func secondsBeforeNow(howMany time.Duration) float64 { + timepoint := time.Now().Add(-howMany * time.Second).Unix() + return float64(timepoint) +} + +func secondsAfterNow(howMany time.Duration) float64 { + timepoint := time.Now().Add(howMany * time.Second).Unix() + return float64(timepoint) +} + +var tokenExpiryFixtures = []float64{ + math.NaN(), 0, -1, secondsBeforeNow(0), secondsBeforeNow(1), + 1631127131.1251214, // Wed Sep 08 2021 18:52:11 GMT+0000 +} + +func TestTokenExpiry(t *testing.T) { + for k, expiry := range tokenExpiryFixtures { + token := NewToken("secret", expiry) + if !token.HasExpired() { + t.Errorf("[%d] want: expired; got: still valid", k) + } + } +} + +var tokenNotExpiredFixtures = []float64{ + secondsAfterNow(5), secondsAfterNow(600), + 2631127131.1251214, // Sat May 17 2053 20:38:51 GMT+0000 +} + +func TestTokenNotExpired(t *testing.T) { + for k, expiry := range tokenNotExpiredFixtures { + token := NewToken("secret", expiry) + if token.HasExpired() { + t.Errorf("[%d] want: still valid; got: expired", k) + } + } +} + +var tokenSecondsB4ExpiryFixtures = []struct { + expiry float64 + want uint64 +}{ + {secondsAfterNow(10), 10}, {secondsAfterNow(20), 20}, + {secondsAfterNow(600), 600}, {secondsAfterNow(3600), 3600}, +} + +func TestTokenSecondsB4Expiry(t *testing.T) { + for k, d := range tokenSecondsB4ExpiryFixtures { + token := NewToken("secret", d.expiry) + got := token.SecondsLeftBeforeExpiry() + if math.Abs(float64(got)-float64(d.want)) > 2 { + t.Errorf("[%d] want: %d; got: %d", k, d.want, got) + } + } +} diff --git a/osmops/util/tgz/ar_unar_test.go b/osmops/util/tgz/ar_unar_test.go new file mode 100644 index 0000000..9aa5bae --- /dev/null +++ b/osmops/util/tgz/ar_unar_test.go @@ -0,0 +1,113 @@ +package tgz + +import ( + "fmt" + "path" + "path/filepath" + "reflect" + "runtime" + + "io/ioutil" + "os" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +const ArchiveTestDirName = "archive_test_dir" + +func findTestDataDir() file.AbsPath { + _, thisFileName, _, _ := runtime.Caller(1) + enclosingDir := filepath.Dir(thisFileName) + testDataDir := filepath.Join(enclosingDir, ArchiveTestDirName) + p, _ := file.ParseAbsPath(testDataDir) + + return p +} + +func withTempDir(t *testing.T, do func(string)) { + if tempDir, err := ioutil.TempDir("", "tgz-test"); err != nil { + t.Errorf("couldn't create temp dir: %v", err) + } else { + defer os.RemoveAll(tempDir) + defer os.Chmod(tempDir, 0700) // make sure you can remove it + do(tempDir) + } +} + +func checkExtractedPaths(t *testing.T, sourceDir file.AbsPath, extractedDir string) { + want, _ := file.ListPaths(sourceDir.Value()) + got, _ := file.ListPaths(extractedDir) + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestTgzThenExtract(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sourceDir := findTestDataDir() + tarball, _ := file.ParseAbsPath(path.Join(tempDirPath, "test.tgz")) + extractedDir := path.Join(tempDirPath, ArchiveTestDirName) + + MakeTarball(sourceDir, tarball) + if err := ExtractTarball(tarball, tempDirPath); err != nil { + t.Fatalf("want: extract; got: %v", err) + } + checkExtractedPaths(t, sourceDir, extractedDir) + }) +} + +func checkFileContent(pathname string) error { + name := path.Base(pathname) + content, err := ioutil.ReadFile(pathname) + if err != nil { + return err + } + text := string(content) + if name != text { + return fmt.Errorf("path = %s; content = %s", pathname, text) + } + return nil +} + +func checkExtractedFiles(t *testing.T, extractedDir string) { + targetDir, _ := file.ParseAbsPath(extractedDir) + scanner := file.NewTreeScanner(targetDir) + es := scanner.Visit(func(node file.TreeNode) error { + if !node.FsMeta.IsDir() { + return checkFileContent(node.NodePath.Value()) + } + return nil + }) + if len(es) > 0 { + t.Errorf("want no errors; got: %v", es) + } +} + +func TestTgzThenExtractContent(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sourceDir := findTestDataDir() + tarball, _ := file.ParseAbsPath(path.Join(tempDirPath, "test.tgz")) + extractedDir := path.Join(tempDirPath, ArchiveTestDirName) + + MakeTarball(sourceDir, tarball) + if err := ExtractTarball(tarball, tempDirPath); err != nil { + t.Fatalf("want: extract; got: %v", err) + } + checkExtractedFiles(t, extractedDir) + }) +} + +func TestExtractTgzCreatedWithUnixTar(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sourceDir := findTestDataDir() + tarball, _ := file.ParseAbsPath(sourceDir.Value() + ".tgz") + extractedDir := path.Join(tempDirPath, ArchiveTestDirName) + + if err := ExtractTarball(tarball, tempDirPath); err != nil { + t.Fatalf("want: extract; got: %v", err) + } + checkExtractedPaths(t, sourceDir, extractedDir) + checkExtractedFiles(t, extractedDir) + }) +} diff --git a/osmops/util/tgz/archive.go b/osmops/util/tgz/archive.go new file mode 100644 index 0000000..1b2d630 --- /dev/null +++ b/osmops/util/tgz/archive.go @@ -0,0 +1,47 @@ +package tgz + +import ( + "io" + "os" + "path" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// WriteFileArchive collects all the files in sourceDir (and its sub-dirs) +// and writes them to a gzip tar archive. +// Each file gets written to the archive at path "b/r" where b is sourceDir's +// base name and r is the file's path relative to sourceDir. For example, if +// sourceDir = "my/source" contains a file "my/source/d/f", that file gets +// archived at "source/d/f". The archive bytes get written to the give sink +// stream. +func WriteFileArchive(sourceDir file.AbsPath, sink io.WriteCloser) error { + archiveBaseDirName := path.Base(sourceDir.Value()) + scanner := file.NewTreeScanner(sourceDir) + writer, err := NewWriter(archiveBaseDirName, sink, WithBestCompression()) + if err != nil { + return err + } + + defer writer.Close() + if es := scanner.Visit(writer.Visitor()); len(es) > 0 { + return es[0] + } + return nil +} + +// MakeTarball collects all the files in sourceDir (and its sub-dirs) and +// writes them to a gzip tar archive file. The archive file is created with +// 0644 permissions at the path specified by tarballPath. +// Each file in sourceDir gets written to the archive at path "b/r" where b +// is sourceDir's base name and r is the file's path relative to sourceDir. +// For example, if sourceDir = "my/source" contains a file "my/source/d/f", +// that file gets archived at "source/d/f". +func MakeTarball(sourceDir, tarballPath file.AbsPath) error { + dest, err := os.OpenFile(tarballPath.Value(), + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + return WriteFileArchive(sourceDir, dest) +} diff --git a/osmops/util/tgz/archive_test.go b/osmops/util/tgz/archive_test.go new file mode 100644 index 0000000..a21b4e6 --- /dev/null +++ b/osmops/util/tgz/archive_test.go @@ -0,0 +1,46 @@ +package tgz + +import ( + "io/fs" + "os" + "path" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/bytez" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func TestWriteFileArchiveErrOnNilSink(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sourceDir := findTestDataDir() + if err := WriteFileArchive(sourceDir, nil); err == nil { + t.Errorf("want: error; got: nil") + } + }) +} + +func TestWriteFileArchiveVisitorErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sink := bytez.NewBuffer() + sourceDir, _ := file.ParseAbsPath(tempDirPath) + os.Chmod(tempDirPath, 0200) // visitor can't scan it + + err := WriteFileArchive(sourceDir, sink) + if _, ok := err.(*file.VisitError); !ok { + t.Errorf("want: visit error; got: %v", err) + } + }) +} + +func TestMakeTarballOpenFileErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + sourceDir := findTestDataDir() + tarball, _ := file.ParseAbsPath(path.Join(tempDirPath, "test.tgz")) + os.Chmod(tempDirPath, 0400) // can't write tarball to it + + err := MakeTarball(sourceDir, tarball) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: visit error; got: %v", err) + } + }) +} diff --git a/osmops/util/tgz/archive_test_dir.tgz b/osmops/util/tgz/archive_test_dir.tgz new file mode 100644 index 0000000..d0b8386 Binary files /dev/null and b/osmops/util/tgz/archive_test_dir.tgz differ diff --git a/osmops/util/tgz/archive_test_dir/d1/f2 b/osmops/util/tgz/archive_test_dir/d1/f2 new file mode 100644 index 0000000..70f80fc --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/d1/f2 @@ -0,0 +1 @@ +f2 \ No newline at end of file diff --git a/osmops/util/tgz/archive_test_dir/d1/f3 b/osmops/util/tgz/archive_test_dir/d1/f3 new file mode 100644 index 0000000..87471a8 --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/d1/f3 @@ -0,0 +1 @@ +f3 \ No newline at end of file diff --git a/osmops/util/tgz/archive_test_dir/d2/d3/f6 b/osmops/util/tgz/archive_test_dir/d2/d3/f6 new file mode 100644 index 0000000..e5bef7d --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/d2/d3/f6 @@ -0,0 +1 @@ +f6 \ No newline at end of file diff --git a/osmops/util/tgz/archive_test_dir/d2/f4 b/osmops/util/tgz/archive_test_dir/d2/f4 new file mode 100644 index 0000000..00cc282 --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/d2/f4 @@ -0,0 +1 @@ +f4 \ No newline at end of file diff --git a/osmops/util/tgz/archive_test_dir/d2/f5 b/osmops/util/tgz/archive_test_dir/d2/f5 new file mode 100644 index 0000000..968c356 --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/d2/f5 @@ -0,0 +1 @@ +f5 \ No newline at end of file diff --git a/osmops/util/tgz/archive_test_dir/f1 b/osmops/util/tgz/archive_test_dir/f1 new file mode 100644 index 0000000..9dd7ac9 --- /dev/null +++ b/osmops/util/tgz/archive_test_dir/f1 @@ -0,0 +1 @@ +f1 \ No newline at end of file diff --git a/osmops/util/tgz/reader.go b/osmops/util/tgz/reader.go new file mode 100644 index 0000000..c62b2e0 --- /dev/null +++ b/osmops/util/tgz/reader.go @@ -0,0 +1,89 @@ +package tgz + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" +) + +// EntryReader processes an entry in a tar archive. +// The entry is at archivePath and has an associated file metadata whereas +// the content should only be read if the entry is a regular file. +type EntryReader func( + archivePath string, fi os.FileInfo, content io.Reader) error + +// Reader calls an EntryReader on each entry in a tar archive. +type Reader interface { + // IterateEntries calls process on each tar entry. + // Regardless of errors, IterateEntries closes the archive stream, + // making the Reader unusable. + IterateEntries(process EntryReader) error + // Close releases all archive stream resources, making the Reader + // unusable. Subsequent calls have no effect. + Close() +} + +type rdr struct { + source io.ReadCloser + deflateStream *gzip.Reader + archive *tar.Reader + closed bool +} + +// NewReader creates a Reader to process entries contained in the given +// gzip-compressed tar archive. +func NewReader(source io.ReadCloser) (Reader, error) { + if source == nil { + return nil, fmt.Errorf("nil source") + } + deflateStream, err := gzip.NewReader(source) + if err != nil { + return nil, err + } + return &rdr{ + source: source, + deflateStream: deflateStream, + archive: tar.NewReader(deflateStream), + closed: false, + }, nil +} + +func (r *rdr) Close() { + if r.closed { + return + } + r.source.Close() + r.deflateStream.Close() + r.closed = true +} + +func (r *rdr) IterateEntries(process EntryReader) error { + defer r.Close() + + if process == nil { + return fmt.Errorf("nil entry reader") + } + if r.closed { + return fmt.Errorf("closed reader") + } + + return r.forEachEntry(process) +} + +func (r *rdr) forEachEntry(process EntryReader) error { + for { + header, err := r.archive.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + err = process(header.Name, header.FileInfo(), r.archive) + if err != nil { + return err + } + } +} diff --git a/osmops/util/tgz/reader_test.go b/osmops/util/tgz/reader_test.go new file mode 100644 index 0000000..01d8acd --- /dev/null +++ b/osmops/util/tgz/reader_test.go @@ -0,0 +1,94 @@ +package tgz + +import ( + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/bytez" +) + +func TestNewReaderErrOnNilSource(t *testing.T) { + if got, err := NewReader(nil); err == nil { + t.Errorf("want error; got: %v", got) + } +} + +func TestNewReaderErrOnNonGzipSource(t *testing.T) { + src := bytez.NewBuffer() + src.Write([]byte{1, 2, 3}) + + if got, err := NewReader(src); err == nil { + t.Errorf("want error; got: %v", got) + } +} + +func TestIterateEntriesErrOnNilProcess(t *testing.T) { + reader := writeArchiveAndCreateReader(t) + if err := reader.IterateEntries(nil); err == nil { + t.Errorf("want error; got: nil") + } +} + +func TestIterateEntriesErrOnClosedReader(t *testing.T) { + reader := writeArchiveAndCreateReader(t) + noop := func(archivePath string, fi os.FileInfo, content io.Reader) error { + return nil + } + + reader.Close() + if err := reader.IterateEntries(noop); err == nil { + t.Errorf("want error; got: nil") + } +} + +func TestIterateEntriesProcessErr(t *testing.T) { + reader := writeArchiveAndCreateReader(t) + bomb := func(archivePath string, fi os.FileInfo, content io.Reader) error { + return fmt.Errorf("error at: %s", archivePath) + } + + if err := reader.IterateEntries(bomb); err == nil { + t.Errorf("want error; got: nil") + } +} + +func makeBrokenArchiveReader(t *testing.T) Reader { + sink := bytez.NewBuffer() + writer, err := NewWriter("", sink, WithBestSpeed()) + if err != nil { + t.Fatalf("couldn't create writer: %v", err) + } + if err := writer.AddEntry("foo", strings.NewReader("bar")); err != nil { + t.Fatalf("couldn't write entry: %v", err) + } + writer.Close() + + data, err := io.ReadAll(sink) + if err != nil { + t.Fatalf("couldn't read tgz data: %v", err) + } + data[50] = 50 // make reading of tar header fail + + source := bytez.NewBuffer() + source.Write(data) + + reader, err := NewReader(source) + if err != nil { + t.Fatalf("couldn't create reader: %v", err) + } + return reader +} + +func TestIterateEntriesErrOnBrokenArchive(t *testing.T) { + reader := makeBrokenArchiveReader(t) + noop := func(archivePath string, fi os.FileInfo, content io.Reader) error { + return nil + } + + if err := reader.IterateEntries(noop); err == nil { + t.Errorf("want error; got: nil") + } +} diff --git a/osmops/util/tgz/rw_test.go b/osmops/util/tgz/rw_test.go new file mode 100644 index 0000000..3dcfdf7 --- /dev/null +++ b/osmops/util/tgz/rw_test.go @@ -0,0 +1,98 @@ +package tgz + +import ( + "fmt" + "io" + "os" + "path" + "reflect" + "sort" + "strings" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/bytez" + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func writeArchive(sink io.WriteCloser) []error { + errs := []error{} + + writer, err := NewWriter("", sink, WithBestSpeed()) + if err != nil { + errs = append(errs, err) + return errs + } + defer writer.Close() + + sourceDir := findTestDataDir() + scanner := file.NewTreeScanner(sourceDir) + es := scanner.Visit(writer.Visitor()) + errs = append(errs, es...) + + if err := writer.AddEntry("extra", strings.NewReader("extra")); err != nil { + errs = append(errs, err) + } + + return errs +} + +func writeArchiveAndCreateReader(t *testing.T) Reader { + buf := bytez.NewBuffer() + es := writeArchive(buf) + if len(es) > 0 { + t.Fatalf("couldn't write archive: %v", es) + } + + reader, err := NewReader(buf) + if err != nil { + t.Fatalf("couldn't create reader: %v", err) + } + + return reader +} + +func checkEntryContent(archivePath string, fi os.FileInfo, entry io.Reader) error { + name := path.Base(archivePath) + contentBytes, err := io.ReadAll(entry) + if err != nil { + return err + } + text := string(contentBytes) + if name != text { + return fmt.Errorf("path = %s; content = %s", archivePath, text) + } + return nil +} + +func checkArchivePaths(t *testing.T, got []string) { + want := []string{ + "d1/f2", "d1/f3", "d2/d3/f6", "d2/f4", "d2/f5", "extra", "f1", + } + + sort.Strings(want) + sort.Strings(got) + + if !reflect.DeepEqual(want, got) { + t.Errorf("want: %v; got: %v", want, got) + } +} + +func TestWriteThenReadContent(t *testing.T) { + reader := writeArchiveAndCreateReader(t) + if err := reader.IterateEntries(checkEntryContent); err != nil { + t.Errorf("entry content should be the same as entry name: %v", err) + } +} + +func TestWriteThenReadPaths(t *testing.T) { + reader := writeArchiveAndCreateReader(t) + + paths := []string{} + _ = reader.IterateEntries( + func(archivePath string, fi os.FileInfo, entry io.Reader) error { + paths = append(paths, archivePath) + return nil + }) + + checkArchivePaths(t, paths) +} diff --git a/osmops/util/tgz/unarchive.go b/osmops/util/tgz/unarchive.go new file mode 100644 index 0000000..4ea1665 --- /dev/null +++ b/osmops/util/tgz/unarchive.go @@ -0,0 +1,76 @@ +package tgz + +import ( + "io" + "io/fs" + "os" + "path/filepath" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// ExtractTarball extracts the files in the given tarball to the specified +// directory, taking care of creating intermediate directories as needed. +// +// If you pass the empty string for destDirPath, ExtractTarball preserves +// the original archive paths, even if they're absolute. For example, if +// "/d/f" is the path of file f in the archive, ExtractTarball will try +// creating a directory "/d" if it doesn't exist and then put f in there. +// With an empty destDirPath, ExtractTarball resolves relative archive paths +// with respect to the current directory. For example, if "d/f" is the path +// of file f in the archive, ExtractTarball will try creating a directory +// "./d" if it doesn't exist and then put f in there. +// +// On the other hand, if you specify a destDirPath (either absolute or +// relative to the current directory), ExtractTarball recreates the directory +// structure of the archived files entirely in destDirPath by interpreting +// all archive paths (even absolute ones) relative to destDirPath. For example, +// if "/d/f" is the path of file f in the archive, ExtractTarball will try +// creating a directory "destDirPath/d" if it doesn't exist and then put f +// in there. The same happens to relative paths. For example, if "d/f" is +// the path of file f in the archive, ExtractTarball will try creating a +// directory "destDirPath/d" if it doesn't exist and then put f in there. +func ExtractTarball(tarballPath file.AbsPath, destDirPath string) error { + source, err := os.Open(tarballPath.Value()) + if err != nil { + return err + } + + reader, err := NewReader(source) + if err != nil { + return err + } + + return reader.IterateEntries(makeEntryReader(destDirPath)) +} + +func makeEntryReader(destDirPath string) EntryReader { + return func(archivePath string, fi os.FileInfo, content io.Reader) error { + if fi.IsDir() { + return nil + } + + targetPath := filepath.Join(destDirPath, archivePath) + if err := ensureDirs(fi, targetPath); err != nil { + return err + } + + fd, err := os.OpenFile(targetPath, + os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return err + } + defer fd.Close() + + _, err = io.Copy(fd, content) + return err + } +} + +func ensureDirs(fi os.FileInfo, targetPath string) error { + if fi.IsDir() { + return os.MkdirAll(targetPath, fi.Mode()) + } + enclosingDir := filepath.Dir(targetPath) + return os.MkdirAll(enclosingDir, fs.ModePerm|fs.ModeDir) +} diff --git a/osmops/util/tgz/unarchive_test.go b/osmops/util/tgz/unarchive_test.go new file mode 100644 index 0000000..193aa25 --- /dev/null +++ b/osmops/util/tgz/unarchive_test.go @@ -0,0 +1,122 @@ +package tgz + +import ( + "io/fs" + "os" + "path" + "strings" + "testing" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +func makeTarballPath(t *testing.T, tempDirPath string) file.AbsPath { + tarball, err := file.ParseAbsPath(path.Join(tempDirPath, "test.tgz")) + if err != nil { + t.Fatalf("couldn't build tarball pathname: %v", err) + } + return tarball +} + +func writeBogusTarball(t *testing.T, tempDirPath string) file.AbsPath { + tarball := makeTarballPath(t, tempDirPath) + + data := []byte{1, 2, 3} + if err := os.WriteFile(tarball.Value(), data, os.ModePerm); err != nil { + t.Fatalf("couldn't write tarball: %v", err) + } + + return tarball +} + +func writeFlatTarball(t *testing.T, tempDirPath string) file.AbsPath { + tarball := makeTarballPath(t, tempDirPath) + + dest, err := os.OpenFile(tarball.Value(), + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + t.Fatalf("couldn't open tarball for writing: %v", err) + } + + writer, err := NewWriter("", dest, WithBestSpeed()) + if err != nil { + t.Fatalf("couldn't create tarball writer: %v", err) + } + defer writer.Close() + + if err = writer.AddEntry("foo", strings.NewReader("bar")); err != nil { + t.Fatalf("couldn't write tarball entry: %v", err) + } + + return tarball +} + +func TestExtractTarballFileOpenErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + tarball := writeBogusTarball(t, tempDirPath) + os.Chmod(tarball.Value(), 0200) // can't read + + err := ExtractTarball(tarball, tempDirPath) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: file open error; got: %v", err) + } + }) +} + +func TestExtractTarballMalformedFileErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + tarball := writeBogusTarball(t, tempDirPath) + + err := ExtractTarball(tarball, tempDirPath) + wantMgs := "unexpected EOF" + if err == nil || err.Error() != wantMgs { // (*) + t.Errorf("want: malformed file error; got: %v", err) + } + }) + + // NOTE. Error type. The gzip pkg returns a generic errors.errorString, + // so the best we can do is check for the error message. +} + +func TestExtractTarballWriteEntryErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + tarball := writeFlatTarball(t, tempDirPath) + os.Chmod(tempDirPath, 0500) // ExtractTarball can't write in here + + err := ExtractTarball(tarball, tempDirPath) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: file entry write error; got: %v", err) + } + }) +} + +func TestExtractTarballCreateDestDirErr(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + destDirPath := path.Join(tempDirPath, "dest") + tarball := writeFlatTarball(t, tempDirPath) + os.Chmod(tempDirPath, 0500) // ExtractTarball can't write in here + + err := ExtractTarball(tarball, destDirPath) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: file entry write error; got: %v", err) + } + }) +} + +func TestEnsureDir(t *testing.T) { + withTempDir(t, func(tempDirPath string) { + dirPath := path.Join(tempDirPath, "foo") + if err := os.Mkdir(dirPath, fs.ModePerm|fs.ModeDir); err != nil { + t.Fatalf("couldn't create %s: %v", dirPath, err) + } + + fi, err := os.Stat(dirPath) + if err != nil { + t.Fatalf("couldn't stat %s: %v", dirPath, err) + } + + if err = ensureDirs(fi, dirPath); err != nil { + t.Errorf("want: no error; got: %v", err) + } + }) +} diff --git a/osmops/util/tgz/writer.go b/osmops/util/tgz/writer.go new file mode 100644 index 0000000..371a4b6 --- /dev/null +++ b/osmops/util/tgz/writer.go @@ -0,0 +1,149 @@ +package tgz + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + + "github.com/martel-innovate/osmops/osmops/util/file" +) + +// Writer writes data to a compressed tar stream. +// The tar format is PAX and the compression is gzip. +// You create a Writer with a sink stream where the compressed tar data +// gets written. +// +// Example. Archiving all the files in "some/dir" and its sub-directories. +// +// sourceDir, _ := file.ParseAbsPath("some/dir") +// sink, _ := os.OpenFile("my.tgz", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) +// archiveBaseDirName := "my-root" +// +// scanner := file.NewTreeScanner(sourceDir) +// writer, _ := NewWriter(archiveBaseDirName, sink) +// +// defer writer.Close() +// scanner.Visit(writer.Visitor()) +// +// If "some/dir/d1/f1" is a file, then it'll be archived at "my-root/d1/f1". +// Use an empty string for archive base directory name if you don't want to +// prefix archived paths---e.g. the above file would be archived at "d1/f1". +// The above example uses a file.Visitor to collect files from a directory, +// but you can also call directly the AddEntry and AddFile methods for finer +// control over what gets written to the archive. Also, there's a couple of +// convenience functions to archive directory contents to a stream or a file, +// see: WriteFileArchive and MakeTarball. +type Writer interface { + // AddEntry writes the given content to the archive at the specified + // path, relative to the archive base directory. + AddEntry(archivePath string, content io.Reader) error + // AddFile writes the given file to the archive at the specified path, + // relative to the archive base directory. + AddFile(archivePath string, filePath string, fi os.FileInfo) error + // Visitor returns a function you can use with a file.TreeScanner to + // collect all the files in a directory (including sub-directories) + // and add them to the archive. + Visitor() file.Visitor + // Close finalises the writing to the archive and closes the underlying + // sink stream. + Close() +} + +type tarball struct { + contentStream *tar.Writer + compressedStream *gzip.Writer + sink io.WriteCloser + setHeaderFields tarHeaderSetter +} + +func NewWriter(archiveBaseDirName string, sink io.WriteCloser, + opts ...WriterOption) (Writer, error) { + if sink == nil { + return nil, fmt.Errorf("nil sink") + } + + cfg := makeWriterCfg(archiveBaseDirName, opts...) + gzipStream, err := gzip.NewWriterLevel(sink, cfg.compressionLevel) + if err != nil { + return nil, err + } + tarStream := tar.NewWriter(gzipStream) + + return &tarball{ + contentStream: tarStream, + compressedStream: gzipStream, + sink: sink, + setHeaderFields: cfg.setHeaderFields, + }, nil +} + +func (t *tarball) Close() { + t.contentStream.Close() + t.compressedStream.Close() + t.sink.Close() +} + +func (t *tarball) writeHeader(archivePath string, hdr *tar.Header) error { + if err := t.setHeaderFields(archivePath, hdr); err != nil { + return err + } + return t.contentStream.WriteHeader(hdr) +} + +func (t *tarball) AddEntry(archivePath string, content io.Reader) error { + contentBytes, err := io.ReadAll(content) // (*) see note below + if err != nil { + return err + } + + header := &tar.Header{ + Mode: int64(0644), + Size: int64(len(contentBytes)), // (*) see note below + } + if err := t.writeHeader(archivePath, header); err != nil { + return err + } + + _, err = t.contentStream.Write(contentBytes) + return err + + // NOTE. Sucking all content into memory. It sucks. But I don't think + // the tar package API supports streaming of content not coming from + // a file. In fact, you've got to specify the Header's size *before* + // calling the Write method which rules out arbitrary streams where + // you've got no way to tell beforehand how much data you can read. +} + +func (t *tarball) AddFile(archivePath, filePath string, fi os.FileInfo) error { + var err error + + if fi == nil || !fi.Mode().IsRegular() { + return nil + } + + header, err := tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return err + } + err = t.writeHeader(archivePath, header) + if err != nil { + return err + } + + fd, err := os.Open(filePath) + if err != nil { + return err + } + _, err = io.Copy(t.contentStream, fd) + fd.Close() + + return err +} + +func (t *tarball) Visitor() file.Visitor { + return func(node file.TreeNode) error { + return t.AddFile(node.RelPath, node.NodePath.Value(), node.FsMeta) + } +} diff --git a/osmops/util/tgz/writer_test.go b/osmops/util/tgz/writer_test.go new file mode 100644 index 0000000..4453582 --- /dev/null +++ b/osmops/util/tgz/writer_test.go @@ -0,0 +1,152 @@ +package tgz + +import ( + "compress/gzip" + "fmt" + "io/fs" + "strings" + "testing" + "time" + + "github.com/martel-innovate/osmops/osmops/util/bytez" +) + +// implements io.Reader +type contentBomb struct{} + +func (x *contentBomb) Read(p []byte) (n int, err error) { + return 1, fmt.Errorf("failed to read") +} + +// implements fs.FileInfo +type bogusFile struct { + forceRegular bool + size int64 + modeCallCount int +} + +func (x *bogusFile) Name() string { + return "" +} + +func (x *bogusFile) Size() int64 { + if x.forceRegular { + return x.size + } + return 0 +} + +func (x *bogusFile) Mode() fs.FileMode { + if x.forceRegular { + return 0 // regular file + } + if x.modeCallCount == 0 { // AddFileEntry checks if it's regular + x.modeCallCount++ + return 0 // regular file + } + return fs.ModeIrregular +} + +func (x *bogusFile) ModTime() time.Time { + return time.Now() +} + +func (x *bogusFile) IsDir() bool { + return false +} + +func (x *bogusFile) Sys() interface{} { + return nil +} + +func makeMemWriter(opts ...WriterOption) (Writer, *bytez.Buffer) { + sink := bytez.NewBuffer() + if len(opts) == 0 { + opts = []WriterOption{WithBestSpeed()} + } + writer, _ := NewWriter("", sink, opts...) + + return writer, sink +} + +func TestNewWriterAcceptEmptyBaseDir(t *testing.T) { + sink := bytez.NewBuffer() + if _, err := NewWriter("", sink); err != nil { + t.Errorf("want: writer; got: %v", err) + } +} + +func TestNewWriterErrOnNilSink(t *testing.T) { + if got, err := NewWriter("", nil); err == nil { + t.Errorf("want error; got: %v", got) + } +} + +func withInvalidCompLevel() WriterOption { + return func(opts *writerOpts) { + opts.compressionLevel = gzip.HuffmanOnly - 1 + } +} + +func TestNewWriterErrOnInvalidGzipCompLevel(t *testing.T) { + sink := bytez.NewBuffer() + if got, err := NewWriter("", sink, withInvalidCompLevel()); err == nil { + t.Errorf("want error; got: %v", got) + } +} + +func TestAddEntryErrOnContentRead(t *testing.T) { + writer, _ := makeMemWriter() + if err := writer.AddEntry("foo", &contentBomb{}); err == nil { + t.Errorf("want error; got: nil") + } +} + +func TestAddFileDoNothingOnNilFileInfo(t *testing.T) { + writer, _ := makeMemWriter() + if err := writer.AddFile("", "foo", nil); err != nil { + t.Errorf("want no error; got: %v", err) + } +} + +func TestAddFileErrOnBogusFileInfo(t *testing.T) { + writer, _ := makeMemWriter() + if err := writer.AddFile("", "foo", &bogusFile{}); err == nil { + t.Errorf("want error; got: nil") + } +} + +func TestAddFileErrOnWriteHeader(t *testing.T) { + writer, _ := makeMemWriter() + fileInfo := &bogusFile{forceRegular: true, size: -1} + + err := writer.AddFile("", "", fileInfo) + if err == nil || !strings.HasPrefix(err.Error(), "archive/tar:") { // (*) + t.Errorf("want: tar header error; got: %v", err) + } + + // NOTE. Error type. We expect tar.headerError but the tar pkg doesn't + // export it, so the best we can do is check for the error message. +} + +func TestAddFileErrOnFileOpen(t *testing.T) { + writer, _ := makeMemWriter() + fileInfo := &bogusFile{forceRegular: true} + + err := writer.AddFile("", "", fileInfo) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("want: path error; got: %v", err) + } +} + +func TestHeaderWritingErr(t *testing.T) { + wantErr := fmt.Errorf("foo") + writer, _ := makeMemWriter(WithBestSpeed(), withErrHdrSetter(wantErr)) + content := bytez.NewBuffer() + content.Write([]byte{1}) + + got := writer.AddEntry("foo", content) + if got != wantErr { + t.Errorf("want error: %v; got: %v", wantErr, got) + } +} diff --git a/osmops/util/tgz/writeropts.go b/osmops/util/tgz/writeropts.go new file mode 100644 index 0000000..511a7e1 --- /dev/null +++ b/osmops/util/tgz/writeropts.go @@ -0,0 +1,90 @@ +package tgz + +import ( + "archive/tar" + "compress/gzip" + "path" + "time" +) + +// tarHeaderSetter is a function to tweak tar headers the Writer puts in +// the archive. For each archive entry, you can make Writer call this +// function (or a list of them) to modify or add fields to the entry header. +// Setters get chained (in a Either monad of sorts) and there's always an +// initial setter to specify the base header for each file---see below. +type tarHeaderSetter func(archivePath string, hdr *tar.Header) error + +type writerOpts struct { + baseDirName string + compressionLevel int + setHeaderFields tarHeaderSetter +} + +func (opts *writerOpts) chainHdrSetter(set tarHeaderSetter) { + fst, snd := opts.setHeaderFields, set + opts.setHeaderFields = func(archivePath string, hdr *tar.Header) error { + if err := fst(archivePath, hdr); err != nil { + return err + } + return snd(archivePath, hdr) + } +} + +type WriterOption func(opts *writerOpts) + +func baseWriterOpts(baseDirName string) *writerOpts { + return &writerOpts{ + baseDirName: baseDirName, + compressionLevel: gzip.BestCompression, + setHeaderFields: func(archivePath string, hdr *tar.Header) error { + hdr.Name = path.Join(baseDirName, archivePath) + hdr.Typeflag = tar.TypeReg + hdr.Format = tar.FormatPAX + return nil + }, + } +} + +func makeWriterCfg(baseDirName string, opts ...WriterOption) *writerOpts { + cfg := baseWriterOpts(baseDirName) + for _, setting := range opts { + if setting != nil { + setting(cfg) + } + } + return cfg +} + +// Use gzip's default compression level when writing the archive. +func WithDefaultCompression() WriterOption { + return func(opts *writerOpts) { + opts.compressionLevel = gzip.DefaultCompression + } +} + +// Use the highest gzip compression level when writing the archive. +func WithBestCompression() WriterOption { + return func(opts *writerOpts) { + opts.compressionLevel = gzip.BestCompression + } +} + +// Use the lowest gzip compression level when writing the archive. +func WithBestSpeed() WriterOption { + return func(opts *writerOpts) { + opts.compressionLevel = gzip.BestSpeed + } +} + +// Set the access, change and mod time of each tar header to the specified +// time point. +func WithEntryTime(when time.Time) WriterOption { + return func(opts *writerOpts) { + opts.chainHdrSetter(func(archivePath string, hdr *tar.Header) error { + hdr.AccessTime = when + hdr.ChangeTime = when + hdr.ModTime = when + return nil + }) + } +} diff --git a/osmops/util/tgz/writeropts_test.go b/osmops/util/tgz/writeropts_test.go new file mode 100644 index 0000000..b0dbf31 --- /dev/null +++ b/osmops/util/tgz/writeropts_test.go @@ -0,0 +1,118 @@ +package tgz + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "path" + "testing" + "time" +) + +func checkCompressionLevel(t *testing.T, cfg *writerOpts, wantLevel int) { + if cfg.compressionLevel != wantLevel { + t.Errorf("want comp: %d; got: %d", wantLevel, cfg.compressionLevel) + } +} + +func checkBaseHdrFields(t *testing.T, cfg *writerOpts) *tar.Header { + hdr := &tar.Header{} + cfg.setHeaderFields("some/file", hdr) + + wantName := path.Join(cfg.baseDirName, "some/file") + if hdr.Name != wantName { + t.Errorf("want name: %s; got: %s", wantName, hdr.Name) + } + if hdr.Typeflag != tar.TypeReg { + t.Errorf("want type reg; got: %v", hdr.Typeflag) + } + if hdr.Format != tar.FormatPAX { + t.Errorf("want pax format; got: %v", hdr.Format) + } + + return hdr +} + +func TestBaseWriterCfg(t *testing.T) { + got := makeWriterCfg("baseDir") + checkCompressionLevel(t, got, gzip.BestCompression) + checkBaseHdrFields(t, got) +} + +func TestDefaultCompressionOpt(t *testing.T) { + got := makeWriterCfg("baseDir", WithDefaultCompression()) + checkCompressionLevel(t, got, gzip.DefaultCompression) + checkBaseHdrFields(t, got) +} + +func TestBestCompressionOpt(t *testing.T) { + got := makeWriterCfg("baseDir", WithBestCompression()) + checkCompressionLevel(t, got, gzip.BestCompression) + checkBaseHdrFields(t, got) +} + +func TestSpeedCompressionOpt(t *testing.T) { + got := makeWriterCfg("baseDir", WithBestSpeed()) + checkCompressionLevel(t, got, gzip.BestSpeed) + checkBaseHdrFields(t, got) +} + +func TestEntryTimeOpt(t *testing.T) { + epochStart := time.Unix(0, 0) + cfg := makeWriterCfg("baseDir", WithEntryTime(epochStart)) + hdr := checkBaseHdrFields(t, cfg) + + if !hdr.AccessTime.Equal(epochStart) { + t.Errorf("want access time: %v; got: %v", epochStart, hdr.AccessTime) + } + if !hdr.ChangeTime.Equal(epochStart) { + t.Errorf("want change time: %v; got: %v", epochStart, hdr.ChangeTime) + } + if !hdr.ModTime.Equal(epochStart) { + t.Errorf("want mod time: %v; got: %v", epochStart, hdr.ModTime) + } +} + +func TestMakeWriterCfgIgnoreNilSettings(t *testing.T) { + got := makeWriterCfg("baseDir", nil) + if got == nil { + t.Fatalf("want: config; got: nil") + } + checkBaseHdrFields(t, got) +} + +func withErrHdrSetter(err error) WriterOption { + return func(opts *writerOpts) { + opts.chainHdrSetter(func(archivePath string, hdr *tar.Header) error { + return err + }) + } +} + +func withHdrSetter(set func(hdr *tar.Header)) WriterOption { + return func(opts *writerOpts) { + opts.chainHdrSetter(func(archivePath string, hdr *tar.Header) error { + set(hdr) + return nil + }) + } +} + +func TestChainSettersBailOutOnFirstErr(t *testing.T) { + wantErr := fmt.Errorf("foo") + dontWantHdrName := "this setter shouldn't be called" + nameOverride := func(hdr *tar.Header) { + hdr.Name = dontWantHdrName + } + cfg := makeWriterCfg("baseDir", + withErrHdrSetter(wantErr), withHdrSetter(nameOverride)) + hdr := &tar.Header{} + + if err := cfg.setHeaderFields("some/file", hdr); err != wantErr { + t.Errorf("want err: %v; got: %v", wantErr, err) + } + if hdr.Name == dontWantHdrName { + t.Errorf( + "want: name setter not called b/c of previous setter err; got: called") + } +} diff --git a/osmops/util/types.go b/osmops/util/types.go new file mode 100644 index 0000000..b606af9 --- /dev/null +++ b/osmops/util/types.go @@ -0,0 +1,178 @@ +package util + +import ( + "errors" + "fmt" + "net" + "net/url" + "regexp" + "strconv" + "strings" +) + +type NonEmptyStr struct{ data string } + +func (d NonEmptyStr) Value() string { + return d.data +} + +func NewNonEmptyStr(s string) (NonEmptyStr, error) { + if len(s) == 0 { + return NonEmptyStr{}, errors.New("nil or empty string") + } + return NonEmptyStr{data: s}, nil +} + +type HostAndPort struct { + h string + p int +} + +func ParsePort(p string) (int, error) { + p = strings.TrimSpace(p) + if port, err := strconv.Atoi(p); err == nil { + if 0 <= port && port <= 65535 { + return port, nil + } + } + return 0, fmt.Errorf("invalid port: %s", p) +} + +var hostnameRx = regexp.MustCompile( + `^(([a-zA-Z0-9_-]){1,63}\.)*([a-zA-Z0-9_-]){1,63}$`) + +// This article explains quite well what makes up a valid hostname: +// - https://en.wikipedia.org/wiki/Hostname + +func IsHostname(host string) error { + if 0 < len(host) && len(host) < 254 { + if net.ParseIP(host) != nil || hostnameRx.MatchString(host) { + return nil + } + } + return fmt.Errorf("invalid hostname: %s", host) +} + +func ParseHostAndPort(hp string) (*HostAndPort, error) { + hp = strings.TrimSpace(hp) // (1) + if host, portString, err := net.SplitHostPort(hp); err != nil { + return nil, err + } else { + if err := IsHostname(host); err != nil { // (2) + return nil, err + } + if port, err := ParsePort(portString); err != nil { // (3) + return nil, err + } else { + return &HostAndPort{host, port}, nil + } + } + + // (1) SplitHostPort doesn't trim space, e.g. + // SplitHostPort(" h:1 ") == (" h", "1 ", nil) + // (2) SplitHostPort doesn't check the host part is a valid IP4 or IP6 or + // a valid hostname e.g. + // SplitHostPort(":123") == ("", "123", nil) + // SplitHostPort("??:123") == ("??", "123", nil) + // (3) SplitHostPort doesn't check the port range, e.g. + // SplitHostPort("h:123456789") == ("h", "123456789", nil) +} + +func IsHostAndPort(value interface{}) error { + s, _ := value.(string) + _, err := ParseHostAndPort(s) + return err +} + +func (d *HostAndPort) Host() string { + return d.h +} + +func (d *HostAndPort) Port() int { + return d.p +} + +func (d *HostAndPort) String() string { + return fmt.Sprintf("%s:%d", d.h, d.p) +} + +func (d *HostAndPort) BuildHttpUrl(secure bool, path string) (*url.URL, error) { + if u, err := url.ParseRequestURI(path); err != nil { + return nil, err + } else { + if secure { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + u.Host = d.String() + return u, nil + } +} + +func (d *HostAndPort) Http(path string) (*url.URL, error) { + return d.BuildHttpUrl(false, path) +} + +func (d *HostAndPort) Https(path string) (*url.URL, error) { + return d.BuildHttpUrl(true, path) +} + +type StrEnum struct { + values []string +} + +func NewStrEnum(labels ...string) StrEnum { + e := StrEnum{values: make([]string, len(labels))} + for k, v := range labels { + e.values[k] = strings.ToLower(v) + } + return e +} + +type EnumIx int + +const NotALabel EnumIx = -1 + +func (d StrEnum) IndexOf(label string) EnumIx { + lbl := strings.ToLower(label) + for k, v := range d.values { + if v == lbl { + return EnumIx(k) + } + } + return NotALabel +} + +func (d StrEnum) LabelOf(index EnumIx) string { + if 0 <= index && int(index) < len(d.values) { + return d.values[index] + } + return "" // better return err? what if one of the labels is ""?! +} + +func (d StrEnum) Validate(label interface{}) error { + if v, ok := label.(string); ok { + if d.IndexOf(v) != NotALabel { + return nil + } + } + return fmt.Errorf("not an enum label: %v", label) +} + +type IntSet map[int]bool + +// the joys of boilerplate, see: https://stackoverflow.com/questions/34018908 + +func ToIntSet(values ...int) IntSet { + set := map[int]bool{} + for _, v := range values { + set[v] = true + } + return set +} + +func (s IntSet) Contains(value int) bool { + _, ok := s[value] + return ok +} diff --git a/osmops/util/types_test.go b/osmops/util/types_test.go new file mode 100644 index 0000000..7add918 --- /dev/null +++ b/osmops/util/types_test.go @@ -0,0 +1,230 @@ +package util + +import ( + "fmt" + "testing" +) + +func TestEmptyString(t *testing.T) { + if _, err := NewNonEmptyStr(""); err == nil { + t.Errorf("instantiated a non-empty string with an empty string!") + } +} + +var nonEmptyStringFixtures = []string{" ", "\n", " wada wada "} + +func TestNonEmptyString(t *testing.T) { + for k, d := range nonEmptyStringFixtures { + if s, err := NewNonEmptyStr(d); err != nil { + t.Errorf("[%d] want: valid; got: %v", k, err) + } else { + if d != s.Value() { + t.Errorf("[%d] want: %s; got: %s", k, d, s.Value()) + } + } + } +} + +var invalidHostnameFixtures = []string{ + "", "\n", ":", ":80", "some.host:", "some host", "some host.com", + "what?is.this", "em@il", "what.the.h*ll", + "x1234567890123456789012345678901234567890123456789012345678901234.com", +} + +func TestInvalidHostname(t *testing.T) { + for k, d := range invalidHostnameFixtures { + if err := IsHostname(d); err == nil { + t.Errorf("[%d] want: error; got: valid", k) + } + } +} + +var validHostnameFixtures = []string{ + "::123", "1.2.3.4", "_h.com", "a-b.some_where", "some.host", + "x12345678901234567890123456789012345678901234567890123456789012.com", +} + +func TestValidHostname(t *testing.T) { + for k, d := range validHostnameFixtures { + if err := IsHostname(d); err != nil { + t.Errorf("[%d] want: valid; got: %v", k, err) + } + } +} + +var invalidHostnameAndPortFixtures = []string{ + "", "\n", ":", ":80", "some.host:", "some host:80", "some.host:123456789", +} + +func TestInvalidHostnameAndPort(t *testing.T) { + for k, d := range invalidHostnameAndPortFixtures { + if err := IsHostAndPort(d); err == nil { + t.Errorf("[%d] want: error; got: valid", k) + } + } +} + +var parseHostAndPortFixtures = []struct { + in string + wantHost string + wantPort int +}{ + {"h:0", "h", 0}, {"h:1", "h", 1}, {"h:65535", "h", 65535}, + {"[::123]:0", "::123", 0}, {"[::123]:1", "::123", 1}, + {"[::123]:65535", "::123", 65535}, + {"1.2.3.4:0", "1.2.3.4", 0}, {"1.2.3.4:1", "1.2.3.4", 1}, + {"1.2.3.4:65535", "1.2.3.4", 65535}, +} + +func TestParseHostAndPort(t *testing.T) { + for k, d := range parseHostAndPortFixtures { + if hp, err := ParseHostAndPort(d.in); err != nil { + t.Errorf("[%d] want: valid parse; got: %v", k, err) + } else { + if d.wantHost != hp.Host() || d.wantPort != hp.Port() { + t.Errorf("[%d] want: %s:%d; got: %v", + k, d.wantHost, d.wantPort, hp) + } + + repr := fmt.Sprintf("%s:%d", d.wantHost, d.wantPort) + if repr != hp.String() { + t.Errorf("[%d] want string repr: %s; got: %v", k, repr, hp) + } + } + } +} + +var httpUrlErrorFixtures = []string{"", "a", "a/b"} + +func TestHttpUrlError(t *testing.T) { + hp, _ := ParseHostAndPort("x:80") + for k, d := range httpUrlErrorFixtures { + if got, err := hp.Http(d); err == nil { + t.Errorf("[%d] want error; got: %v", k, got) + } + } +} + +var httpUrlFixtures = []struct { + inPath string + want string +}{ + {"/", "http://x:80/"}, + {"/a", "http://x:80/a"}, {"/a/", "http://x:80/a/"}, + {"/a/b", "http://x:80/a/b"}, {"/a/b/", "http://x:80/a/b/"}, +} + +func TestHttpUrl(t *testing.T) { + hp, _ := ParseHostAndPort("x:80") + for k, d := range httpUrlFixtures { + got, err := hp.Http(d.inPath) + if err != nil { + t.Fatalf("[%d] want string repr: %s; got: %v", k, d.want, err) + } + if got.String() != d.want { + t.Errorf("[%d] want string repr: %s; got: %v", k, d.want, got) + } + } +} + +var httpsUrlFixtures = []struct { + inPath string + want string +}{ + {"/", "https://x:80/"}, + {"/a", "https://x:80/a"}, {"/a/", "https://x:80/a/"}, + {"/a/b", "https://x:80/a/b"}, {"/a/b/", "https://x:80/a/b/"}, +} + +func TestHttspUrl(t *testing.T) { + hp, _ := ParseHostAndPort("x:80") + for k, d := range httpsUrlFixtures { + got, err := hp.Https(d.inPath) + if err != nil { + t.Fatalf("[%d] want string repr: %s; got: %v", k, d.want, err) + } + if got.String() != d.want { + t.Errorf("[%d] want string repr: %s; got: %v", k, d.want, got) + } + } +} + +func TestEmptyStrEnum(t *testing.T) { + e := NewStrEnum() + if e.IndexOf("") != NotALabel || e.IndexOf("x") != NotALabel { + t.Errorf("empty enum should have no label indexes") + } + if e.LabelOf(0) != "" || e.LabelOf(1) != "" { + t.Errorf("empty enum should have no labels") + } + if e.Validate("") == nil || e.Validate("x") == nil { + t.Errorf("empty enum should always fail validation") + } +} + +type enumTest = struct { + StrEnum + A, B, C EnumIx +} + +func NewEnumTest() enumTest { + return enumTest{ + StrEnum: NewStrEnum("A", "b", "C"), + A: 0, + B: 1, + C: 2, + } +} + +func TestStrEnumLookup(t *testing.T) { + e := NewEnumTest() + ixs := []EnumIx{e.A, e.B, e.C} + for _, ix := range ixs { + lbl := e.LabelOf(ix) + if ix != e.IndexOf(lbl) { + t.Errorf("want: %d == IndexOf(LabelOf(%d)); "+ + "got: %d != IndexOf(%s = LabelOf(%d)) == %d", + ix, ix, ix, lbl, ix, e.IndexOf(lbl)) + } + } +} + +func TestStrEnumValidation(t *testing.T) { + e := NewEnumTest() + if err := e.Validate(e.LabelOf(e.A)); err != nil { + t.Errorf("[1] want: valid; got: %v", err) + } + if err := e.Validate("wada wada"); err == nil { + t.Errorf("[2] want: error; got: valid") + } +} + +func TestStrEnumCaseInsensitive(t *testing.T) { + e := NewEnumTest() + if err := e.Validate("B"); err != nil { + t.Errorf("want: uppercase B is valid; got: %v", err) + } + if e.IndexOf("B") == NotALabel { + t.Errorf("want: uppercase B is index of b; got: not a label") + } +} + +func TestEmptyIntSet(t *testing.T) { + s := ToIntSet() + if s.Contains(0) { + t.Errorf("want empty; got: %v", s) + } +} + +func TestNonEmptyIntSet(t *testing.T) { + s := ToIntSet(1, 2) + if s.Contains(0) { + t.Errorf("want: 0 not in s; got: %v", s) + } + if !s.Contains(1) { + t.Errorf("want: 1 in s; got: %v", s) + } + if !s.Contains(2) { + t.Errorf("want: 2 in s; got: %v", s) + } +} diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000..0590631 --- /dev/null +++ b/shell.nix @@ -0,0 +1 @@ +(import ./build {}).devShell