diff --git a/Makefile b/Makefile index 8b947e298..e800aaef8 100644 --- a/Makefile +++ b/Makefile @@ -202,9 +202,6 @@ $(BINDIR)/e2e.test: .init sed "s/GO_VERSION/$(GO_VERSION)/g" < build/build-image/Dockerfile | \ docker build -t clusteroperatorbuildimage - -.apiServerBuilderImage: .clusterOperatorBuildImage build/apiserver-builder/Dockerfile - docker build -t apiserverbuilderimage ./build/apiserver-builder - # Util targets ############## .PHONY: verify verify-generated verify-client-gen verify-mocks @@ -284,7 +281,7 @@ test-unit: .init build .generate_mocks test-integration: .init build @echo Running integration tests: - $(DOCKER_CMD) go test -race $(TEST_FLAGS) \ + $(DOCKER_CMD) go test -parallel 1 -race $(TEST_FLAGS) \ $(addprefix $(CLUSTER_OPERATOR_PKG)/,$(INT_TEST_DIRS)) $(TEST_LOG_FLAGS) clean-e2e: @@ -372,6 +369,7 @@ OA_ANSIBLE_BRANCH ?= release-3.10 define build-cluster-operator-ansible-image #(repo, branch, imagename, tag, clusterapi_playbook) cp build/cluster-operator-ansible/playbooks/cluster-api-prep/$5 build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api.yaml cp bin/aws-machine-controller build/cluster-operator-ansible/playbooks/cluster-api-prep/files + cp bin/cluster-operator build/cluster-operator-ansible/playbooks/cluster-api-prep/files docker build -t "$3:$4" --build-arg=CO_ANSIBLE_URL=$1 --build-arg=CO_ANSIBLE_BRANCH=$2 build/cluster-operator-ansible endef @@ -399,15 +397,6 @@ playbook-mock-image: build/playbook-mock/Dockerfile $(BINDIR)/coutil docker tag $(PLAYBOOK_MOCK_IMAGE) $(REGISTRY)playbook-mock:$(VERSION) docker tag $(PLAYBOOK_MOCK_MUTABLE_IMAGE) $(REGISTRY)playbook-mock:$(MUTABLE_TAG) -.PHONY: $(CLUSTERAPI_BIN)/apiserver -$(CLUSTERAPI_BIN)/apiserver: .apiServerBuilderImage - mkdir -p $(PWD)/$(CLUSTERAPI_BIN) && docker run --security-opt label:disable -v $(PWD)/$(CLUSTERAPI_BIN):/output --entrypoint=/bin/bash apiserverbuilderimage -c "export GOPATH=/go && mkdir -p /go/src/sigs.k8s.io/cluster-api && cd /go/src/sigs.k8s.io/cluster-api && git clone https://github.com/kubernetes-sigs/cluster-api.git . && apiserver-boot build executables --generate=false && touch /output/controller-manager /output/apiserver && cp bin/* /output" - -.PHONY: kubernetes-cluster-api -kubernetes-cluster-api: $(CLUSTERAPI_BIN)/apiserver build/clusterapi-image/Dockerfile - cp build/clusterapi-image/Dockerfile $(CLUSTERAPI_BIN) - docker build -t clusterapi ./$(CLUSTERAPI_BIN) - .PHONY: aws-machine-controller-image aws-machine-controller-image: build/aws-machine-controller/Dockerfile $(BINDIR)/aws-machine-controller $(call build-and-tag,"aws-machine-controller",$(AWS_MACHINE_CONTROLLER_IMAGE),$(AWS_MACHINE_CONTROLLER_MUTABLE_IMAGE)) diff --git a/build/apiserver-builder/Dockerfile b/build/apiserver-builder/Dockerfile deleted file mode 100644 index 001fd018f..000000000 --- a/build/apiserver-builder/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM clusteroperatorbuildimage - -RUN cd /tmp && \ - curl -L -O https://github.com/kubernetes-incubator/apiserver-builder/releases/download/v1.9-alpha.2/apiserver-builder-v1.9-alpha.2-linux-amd64.tar.gz && \ - tar -xzf apiserver-builder-v1.9-alpha.2-linux-amd64.tar.gz && \ - cp ./bin/* /usr/bin diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-common.yaml b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-common.yaml index 8866d9067..a7a3a5cfc 100755 --- a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-common.yaml +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-common.yaml @@ -64,7 +64,7 @@ - name: copy main template over copy: - src: files/cluster-api-template.yaml + src: "files/{{ cluster_api_template }}" dest: "{{ temp_dir.path }}/cluster-api-template.yaml" - name: copy machine controller template over diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-development.yaml b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-development.yaml index f8d3eeff8..9acfc3af0 100755 --- a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-development.yaml +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-development.yaml @@ -17,12 +17,18 @@ dest: /usr/bin/aws-machine-controller setype: container_file_t mode: 0755 - + - name: copy cluster operator binary to master + copy: + src: files/cluster-operator + dest: /usr/bin/cluster-operator + setype: container_file_t + mode: 0755 - hosts: masters[0] gather_facts: no tasks: - name: set machine controller template set_fact: machine_controller_template: machine-controller-template-dev.yaml + cluster_api_template: cluster-api-template-dev.yaml - import_playbook: deploy-cluster-api-common.yaml diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-production.yaml b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-production.yaml index 589bff178..d7d3994ff 100755 --- a/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-production.yaml +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/deploy-cluster-api-production.yaml @@ -12,5 +12,6 @@ - name: set machine controller template set_fact: machine_controller_template: machine-controller-template.yaml + cluster_api_template: cluster-api-template.yaml - import_playbook: deploy-cluster-api-common.yaml diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/.gitignore b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/.gitignore index a057ada83..16a6f44dd 100644 --- a/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/.gitignore +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/.gitignore @@ -1 +1,2 @@ aws-machine-controller +cluster-operator diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template-dev.yaml b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template-dev.yaml new file mode 100644 index 000000000..1b294cda3 --- /dev/null +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template-dev.yaml @@ -0,0 +1,389 @@ +######## +# +# Template for deploying the Cluster Operator. +# +# Parameters: +# CLUSTER_API_NAMESPACE: namespace to hold clusterapi objects/services +# SERVING_CERT: base-64-encoded, pem cert to use for ssl communication with the Cluster API Server. Required. +# SERVING_KEY: base-64-encoded, pem private key for the cert to use for ssl communication with the Cluster API Server. Required. +# SERVING_CA: base-64-encoded, pem CA cert for the ssl certs. Required. +# CLUSTER_API_IMAGE: clusterapi container image location +# CLUSTER_API_IMAGE_PULL_POLICY: control image pull policy for clusterapi containers +# +######## + +apiVersion: v1 +kind: Template +metadata: + name: cluster-operator-deploy-cluster-api-template + +objects: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cluster-api-apiserver + namespace: ${CLUSTER_API_NAMESPACE} + +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: cluster-api-controller-manager + namespace: ${CLUSTER_API_NAMESPACE} + +- apiVersion: apiregistration.k8s.io/v1beta1 + kind: APIService + metadata: + name: v1alpha1.cluster.k8s.io + labels: + api: clusterapi + spec: + version: v1alpha1 + group: cluster.k8s.io + groupPriorityMinimum: 2000 + priority: 200 + service: + name: clusterapi + namespace: ${CLUSTER_API_NAMESPACE} + versionPriority: 10 + caBundle: ${SERVING_CA} + +- apiVersion: v1 + kind: Service + metadata: + name: clusterapi + namespace: ${CLUSTER_API_NAMESPACE} + labels: + app: cluster-api-apiserver + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 6443 + selector: + app: cluster-api-apiserver + +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + name: cluster-api-apiserver + namespace: ${CLUSTER_API_NAMESPACE} + labels: + app: cluster-api-apiserver + spec: + selector: + matchLabels: + app: cluster-api-apiserver + replicas: 1 + template: + metadata: + labels: + app: cluster-api-apiserver + spec: + serviceAccountName: cluster-api-apiserver + containers: + - name: apiserver + image: ${CLUSTER_API_IMAGE} + imagePullPolicy: ${CLUSTER_API_IMAGE_PULL_POLICY} + volumeMounts: + - name: cluster-apiserver-certs + mountPath: /var/run/cluster-api-apiserver + readOnly: true + - name: cluster-operator + mountPath: /opt/services/cluster-operator + readOnly: true + args: + - "cluster-api-server" + - "--etcd-servers=http://localhost:2379" + - "--tls-cert-file=/var/run/cluster-api-apiserver/tls.crt" + - "--tls-private-key-file=/var/run/cluster-api-apiserver/tls.key" + - "--secure-port=6443" + ports: + - containerPort: 6443 + protocol: TCP + terminationMessagePath: /dev/termination-log + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + resources: + requests: + cpu: 100m + memory: 40Mi + limits: + cpu: 100m + memory: 60Mi + - name: etcd + image: quay.io/coreos/etcd:latest + imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 20Mi + limits: + cpu: 100m + memory: 30Mi + env: + - name: ETCD_DATA_DIR + value: /etcd-data-dir + command: + - /usr/local/bin/etcd + - --listen-client-urls + - http://0.0.0.0:2379 + - --advertise-client-urls + - http://localhost:2379 + ports: + - containerPort: 2379 + volumeMounts: + - name: etcd-data-dir + mountPath: /etcd-data-dir + readinessProbe: + httpGet: + port: 2379 + path: /health + failureThreshold: 1 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + httpGet: + port: 2379 + path: /health + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: etcd-data-dir + emptyDir: {} + - name: cluster-apiserver-certs + secret: + secretName: cluster-apiserver-certs + - name: cluster-operator + hostPath: + path: /usr/bin/cluster-operator + +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + name: cluster-api-controller-manager + namespace: ${CLUSTER_API_NAMESPACE} + labels: + app: cluster-api-controller-manager + spec: + selector: + matchLabels: + app: cluster-api-controller-manager + replicas: 1 + template: + metadata: + labels: + app: cluster-api-controller-manager + spec: + serviceAccountName: cluster-api-controller-manager + nodeSelector: + node-role.kubernetes.io/master: "true" + containers: + - name: controller-manager + image: ${CLUSTER_API_IMAGE} + imagePullPolicy: ${CLUSTER_API_IMAGE_PULL_POLICY} + args: + - "cluster-api-controller-manager" + volumeMounts: + - name: cluster-operator + mountPath: /opt/services/cluster-operator + readOnly: true + resources: + requests: + cpu: 100m + memory: 20Mi + limits: + cpu: 100m + memory: 30Mi + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: cluster-operator + hostPath: + path: /usr/bin/cluster-operator + +- apiVersion: v1 + kind: Secret + type: kubernetes.io/tls + metadata: + name: cluster-apiserver-certs + namespace: ${CLUSTER_API_NAMESPACE} + labels: + app: cluster-api-apiserver + data: + tls.crt: ${SERVING_CERT} + tls.key: ${SERVING_KEY} + +- apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: bootstrap-kubeconfig + namespace: ${CLUSTER_API_NAMESPACE} + data: + bootstrap.kubeconfig: "${BOOTSTRAP_KUBECONFIG}" + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: "clusterapi.openshift.io:apiserver-auth-delegator" + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cluster-api-apiserver + namespace: ${CLUSTER_API_NAMESPACE} + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: "clusterapi.openshift.io:apiserver-authentication-reader" + namespace: ${KUBE_SYSTEM_NAMESPACE} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cluster-api-apiserver + namespace: ${CLUSTER_API_NAMESPACE} + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: "clusterapi.openshift.io:controller-manager" + rules: + # configmaps for leader election + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-operator-controller-manager"] + verbs: ["get", "update"] + # events for recording events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch", "update"] + # allow all operations on all resources in our API group + - apiGroups: ["cluster.k8s.io"] + resources: ["*"] + verbs: ["create", "get", "list", "watch", "update", "patch", "delete"] + # allow operations on required resources in any namespace a cluster is created + - apiGroups: [""] + resources: ["configmaps", "pods", "secrets", "nodes"] + verbs: ["*"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["*"] + +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: "clusterapi.openshift.io:controller-manager" + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "clusterapi.openshift.io:controller-manager" + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cluster-api-apiserver + namespace: ${CLUSTER_API_NAMESPACE} + - apiGroup: "" + kind: ServiceAccount + name: cluster-api-controller-manager + namespace: ${CLUSTER_API_NAMESPACE} + +- allowHostDirVolumePlugin: true + allowHostIPC: false + allowHostNetwork: false + allowHostPID: false + allowHostPorts: false + allowPrivilegedContainer: false + allowedCapabilities: null + apiVersion: security.openshift.io/v1 + defaultAddCapabilities: null + fsGroup: + type: MustRunAs + groups: [] + kind: SecurityContextConstraints + metadata: + name: hostmount-restricted-clusterapi-svc + readOnlyRootFilesystem: false + requiredDropCapabilities: + - KILL + - MKNOD + - SETUID + - SETGID + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + supplementalGroups: + type: RunAsAny + users: + - system:serviceaccount:${CLUSTER_API_NAMESPACE}:cluster-api-controller-manager + - system:serviceaccount:${CLUSTER_API_NAMESPACE}:cluster-api-apiserver + volumes: + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - projected + - secret + - hostPath + +parameters: +# namespace to install clusterapi services onto +- name: CLUSTER_API_NAMESPACE + value: kube-cluster +# Do not change +- name: KUBE_SYSTEM_NAMESPACE + value: kube-system +# pull policy (for testing) +- name: CLUSTER_API_IMAGE_PULL_POLICY + value: Always +# CA cert for API Server SSL cert +- name: SERVING_CA +# Private key for API Server SSL cert +- name: SERVING_CERT +# Public API Server SSL cert +- name: SERVING_KEY +# location of container image +- name: CLUSTER_API_IMAGE +# machine controller image +- name: BOOTSTRAP_KUBECONFIG + value: "" diff --git a/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template.yaml b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template.yaml index aeab31a9e..8690c9fd7 100644 --- a/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template.yaml +++ b/build/cluster-operator-ansible/playbooks/cluster-api-prep/files/cluster-api-template.yaml @@ -88,13 +88,11 @@ objects: - name: cluster-apiserver-certs mountPath: /var/run/cluster-api-apiserver readOnly: true - command: - - "./apiserver" args: + - "cluster-api-server" - "--etcd-servers=http://localhost:2379" - "--tls-cert-file=/var/run/cluster-api-apiserver/tls.crt" - "--tls-private-key-file=/var/run/cluster-api-apiserver/tls.key" - - "--loglevel=10" - "--secure-port=6443" ports: - containerPort: 6443 @@ -204,8 +202,8 @@ objects: - name: controller-manager image: ${CLUSTER_API_IMAGE} imagePullPolicy: ${CLUSTER_API_IMAGE_PULL_POLICY} - command: - - "./controller-manager" + args: + - "cluster-api-controller-manager" resources: requests: cpu: 100m diff --git a/build/clusterapi-image/Dockerfile b/build/clusterapi-image/Dockerfile deleted file mode 100644 index 30f934e21..000000000 --- a/build/clusterapi-image/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM ubuntu:14.04 - -RUN apt-get update -RUN apt-get install -y ca-certificates - -ADD apiserver . -ADD controller-manager . - -RUN chmod +x apiserver controller-manager diff --git a/cmd/cluster-api-apiserver/testing/testserver.go b/cmd/cluster-api-apiserver/testing/testserver.go new file mode 100644 index 000000000..ac2a1aa1f --- /dev/null +++ b/cmd/cluster-api-apiserver/testing/testserver.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "testing" + "time" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" + etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + + "github.com/kubernetes-incubator/apiserver-builder/pkg/apiserver" + "github.com/kubernetes-incubator/apiserver-builder/pkg/builders" + "github.com/kubernetes-incubator/apiserver-builder/pkg/cmd/server" + capis "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/pkg/openapi" +) + +// TearDownFunc is to be called to tear down a test server. +type TearDownFunc func() + +// StartTestServer starts a etcd server and kube-apiserver. A rest client config and a tear-down func +// are returned. +// +// Note: we return a tear-down func instead of a stop channel because the later will leak temporariy +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporariy files. +func StartTestServer(t *testing.T) (result *restclient.Config, tearDownForCaller TearDownFunc, err error) { + var tmpDir string + var etcdServer *etcdtesting.EtcdTestServer + stopCh := make(chan struct{}) + tearDown := func() { + close(stopCh) + if etcdServer != nil { + etcdServer.Terminate(t) + } + if len(tmpDir) != 0 { + os.RemoveAll(tmpDir) + } + builders.GroupFactoryRegistry = announced.APIGroupFactoryRegistry{} + builders.Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + builders.Scheme = runtime.NewScheme() + builders.Codecs = serializer.NewCodecFactory(builders.Scheme) + builders.APIGroupBuilders = []*builders.APIGroupBuilder{} + } + defer func() { + if tearDownForCaller == nil { + tearDown() + } + }() + + t.Logf("Starting etcd...") + etcdServer, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t) + + tmpDir, err = ioutil.TempDir("", "openshift-cluster-api-apiserver") + if err != nil { + return nil, nil, fmt.Errorf("Failed to create temp dir: %v", err) + } + + server.GetOpenApiDefinition = openapi.GetOpenAPIDefinitions + s := server.NewServerOptions("/registry/k8s.io", os.Stdout, os.Stderr, capis.GetAllApiBuilders()) + + storageConfig.Codec = builders.Codecs.LegacyCodec(capis.GetClusterAPIBuilder().GetLegacyCodec()...) + s.RunDelegatedAuth = false + s.RecommendedOptions.SecureServing.BindPort = freePort() + s.RecommendedOptions.SecureServing.ServerCert.CertDirectory = tmpDir + s.RecommendedOptions.Etcd.StorageConfig = *storageConfig + s.RecommendedOptions.Etcd.DefaultStorageMediaType = "application/json" + + t.Logf("Starting cluster-api-apiserver...") + runErrCh := make(chan error, 1) + server, err := createServer(s, "cluster-api-apiserver", "v0") + if err != nil { + return nil, nil, fmt.Errorf("Failed to create server: %v", err) + } + go func(stopCh <-chan struct{}) { + if err := server.GenericAPIServer.PrepareRun().Run(stopCh); err != nil { + t.Logf("cluster-api-apiserver exited uncleanly: %v", err) + runErrCh <- err + } + }(stopCh) + + t.Logf("Waiting for /healthz to be ok...") + client, err := kubernetes.NewForConfig(server.GenericAPIServer.LoopbackClientConfig) + if err != nil { + return nil, nil, fmt.Errorf("Failed to create a client: %v", err) + } + err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + select { + case err := <-runErrCh: + return false, err + default: + } + + result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do() + status := 0 + result.StatusCode(&status) + if status == 200 { + return true, nil + } + return false, nil + }) + if err != nil { + return nil, nil, fmt.Errorf("Failed to wait for /healthz to return ok: %v", err) + } + + // from here the caller must call tearDown + return server.GenericAPIServer.LoopbackClientConfig, tearDown, nil +} + +// StartTestServerOrDie calls StartTestServer with up to 5 retries on bind error and dies with +// t.Fatal if it does not succeed. +func StartTestServerOrDie(t *testing.T) (*restclient.Config, TearDownFunc) { + config, td, err := StartTestServer(t) + if err != nil { + t.Fatalf("Failed to launch server: %v", err) + } + return config, td +} + +func freePort() int { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + panic(err) + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + panic(err) + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port +} + +func createServer(o *server.ServerOptions, title, version string) (*apiserver.Server, error) { + config, err := o.Config() + if err != nil { + return nil, err + } + o.BearerToken = config.GenericConfig.LoopbackClientConfig.BearerToken + + for _, provider := range o.APIBuilders { + config.AddApi(provider) + } + + config.Init() + + config.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(server.GetOpenApiDefinition, builders.Scheme) + config.GenericConfig.OpenAPIConfig.Info.Title = title + config.GenericConfig.OpenAPIConfig.Info.Version = version + + server, err := config.Complete().New() + if err != nil { + return nil, err + } + + for _, h := range o.PostStartHooks { + server.GenericAPIServer.AddPostStartHook(h.Name, h.Fn) + } + return server, nil +} diff --git a/cmd/cluster-operator/clusterapi-apiserver.go b/cmd/cluster-operator/clusterapi-apiserver.go new file mode 100644 index 000000000..89e80558c --- /dev/null +++ b/cmd/cluster-operator/clusterapi-apiserver.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + "github.com/kubernetes-incubator/apiserver-builder/pkg/cmd/server" + + capis "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/pkg/openapi" + + "github.com/openshift/cluster-operator/pkg/hyperkube" +) + +// NewClusterAPIServer creates a new hyperkube Server object that includes the +// description and flags. +func NewClusterAPIServer() *hyperkube.Server { + + // APIServer parameters + etcdPath := "/registry/k8s.io" + apis := capis.GetAllApiBuilders() + serverStopCh := make(chan struct{}) + title := "Api" + version := "v0" + + server.GetOpenApiDefinition = openapi.GetOpenAPIDefinitions + // APIServer command + cmd, _ := server.NewCommandStartServer(etcdPath, os.Stdout, os.Stderr, apis, serverStopCh, title, version) + + hks := hyperkube.Server{ + PrimaryName: "cluster-api-server", + SimpleUsage: "cluster-api-server", + Long: cmd.Long, + Run: func(_ *hyperkube.Server, args []string, stopCh <-chan struct{}) error { + go func() { + select { + case <-stopCh: + serverStopCh <- struct{}{} + } + }() + return cmd.Execute() + }, + RespectsStopCh: true, + } + + // Set HyperKube command flags + hks.Flags().AddFlagSet(cmd.Flags()) + + return &hks +} diff --git a/cmd/cluster-operator/clusterapi-controller-manager.go b/cmd/cluster-operator/clusterapi-controller-manager.go new file mode 100644 index 000000000..3f65c4974 --- /dev/null +++ b/cmd/cluster-operator/clusterapi-controller-manager.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "path" + + "github.com/openshift/cluster-operator/pkg/hyperkube" + + controllerlib "github.com/kubernetes-incubator/apiserver-builder/pkg/controller" + "sigs.k8s.io/cluster-api/pkg/controller" + "sigs.k8s.io/cluster-api/pkg/controller/config" +) + +const ( + // program name for debug builds + debugProgramName = "debug" +) + +// NewClusterAPIControllerManager creates a new hyperkube Server object that includes the +// description and flags. +func NewClusterAPIControllerManager(programName string) *hyperkube.Server { + altName := "cluster-api-controller-manager" + + if path.Base(programName) == debugProgramName { + altName = debugProgramName + } + + hks := hyperkube.Server{ + PrimaryName: "cluster-api-controller-manager", + AlternativeName: altName, + SimpleUsage: "cluster-api-controller-manager", + Long: `The cluster API controller manager is a daemon that embeds the core control loops shipped with cluster API.`, + Run: func(_ *hyperkube.Server, args []string, stopCh <-chan struct{}) error { + config, err := controllerlib.GetConfig(config.ControllerConfig.Kubeconfig) + if err != nil { + return fmt.Errorf("Could not create Config for talking to the apiserver: %v", err) + } + controllers, _ := controller.GetAllControllers(config) + controllerlib.StartControllerManager(controllers...) + select { + case <-stopCh: + return nil + } + }, + RespectsStopCh: false, + } + config.ControllerConfig.AddFlags(hks.Flags()) + return &hks +} diff --git a/cmd/cluster-operator/controller-manager.go b/cmd/cluster-operator/controller-manager.go index 8c4801d9b..27084314a 100644 --- a/cmd/cluster-operator/controller-manager.go +++ b/cmd/cluster-operator/controller-manager.go @@ -24,11 +24,6 @@ import ( "github.com/openshift/cluster-operator/pkg/hyperkube" ) -const ( - // program name for debug builds - debugProgramName = "debug" -) - // NewControllerManager creates a new hyperkube Server object that includes the // description and flags. func NewControllerManager(programName string) *hyperkube.Server { diff --git a/cmd/cluster-operator/main.go b/cmd/cluster-operator/main.go index 8812bb6b3..291d01801 100644 --- a/cmd/cluster-operator/main.go +++ b/cmd/cluster-operator/main.go @@ -33,6 +33,8 @@ func main() { hk.AddServer(NewAPIServer()) hk.AddServer(NewControllerManager(os.Args[0])) + hk.AddServer(NewClusterAPIServer()) + hk.AddServer(NewClusterAPIControllerManager(os.Args[0])) hk.RunToExit(os.Args) } diff --git a/contrib/ansible/deploy-devel-playbook.yml b/contrib/ansible/deploy-devel-playbook.yml index 99ef5282d..35b81fe83 100755 --- a/contrib/ansible/deploy-devel-playbook.yml +++ b/contrib/ansible/deploy-devel-playbook.yml @@ -19,7 +19,7 @@ cluster_version_namespace: "{{ cluster_version_namespace | default('openshift-cluster-operator') }}" # Images to deploy on target cluster - cluster_api_image: "{{ cluster_api_image | default('registry.svc.ci.openshift.org/openshift-cluster-operator/kubernetes-cluster-api:latest') }}" + cluster_api_image: "{{ cluster_api_image | default('registry.svc.ci.openshift.org/openshift-cluster-operator/cluster-operator:latest') }}" cluster_api_image_pull_policy: "{{ cluster_api_image_pull_policy | default('Always') }}" machine_controller_image: "{{ machine_controller_image | default('registry.svc.ci.openshift.org/openshift-cluster-operator/cluster-operator:latest') }}" machine_controller_image_pull_policy: "{{ machine_controller_image_pull_policy | default('Always') }}" diff --git a/contrib/ansible/deploy-playbook.yml b/contrib/ansible/deploy-playbook.yml index 0f7426eff..d184e9450 100755 --- a/contrib/ansible/deploy-playbook.yml +++ b/contrib/ansible/deploy-playbook.yml @@ -30,8 +30,6 @@ # Cluster operator git repository to build: cluster_operator_git_repo: "https://github.com/openshift/cluster-operator" cluster_operator_git_ref: master - # Cluster API controller image to deploy on root cluster - cluster_api_image: "registry.svc.ci.openshift.org/openshift-cluster-operator/kubernetes-cluster-api:latest" cluster_api_image_pull_policy: "Always" fake_deployment: False @@ -88,7 +86,7 @@ certgen_ca_name: "{{ apiserver_ca_name }}" certgen_cert_name: "apiserver" certgen_cn: cluster-operator-apiserver - certgen_alt_names: "\"cluster-operator-apiserver.{{ cluster_operator_namespace }}\",\"cluster-operator-apiserver.{{ cluster_operator_namespace }}.svc\"" + certgen_alt_names: "\"cluster-operator-apiserver.{{ cluster_operator_namespace }}\",\"cluster-operator-apiserver.{{ cluster_operator_namespace }}.svc\",\"clusster-api-apiserver\",\"cluster-api-apiserver.{{ cluster_operator_namespace }}\",\"cluster-api-apiserver.{{ cluster_operator_namespace }}.svc\"" # These are expected locations of the files given the directory and names provided when # we called the role. @@ -167,7 +165,6 @@ template_file: "{{ playbook_dir }}/../examples/cluster-api-controllers-template.yaml" parameters: CLUSTER_API_NAMESPACE: "{{ cluster_operator_namespace }}" - CLUSTER_API_IMAGE: "{{ cluster_api_image }}" MACHINE_CONTROLLER_IMAGESTREAM: "{{ machine_controller_imagestream }}" MACHINE_CONTROLLER_ENTRYPOINT: "{{ machine_controller_entrypoint }}" IMAGE_PULL_POLICY: "{{ cluster_api_image_pull_policy }}" diff --git a/contrib/examples/cluster-api-controllers-template.yaml b/contrib/examples/cluster-api-controllers-template.yaml index 118d8685d..babd42e1c 100644 --- a/contrib/examples/cluster-api-controllers-template.yaml +++ b/contrib/examples/cluster-api-controllers-template.yaml @@ -68,8 +68,8 @@ objects: secret: secretName: bootstrap-kubeconfig -- apiVersion: apps/v1beta1 - kind: Deployment +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 metadata: name: cluster-api-controller-manager namespace: ${CLUSTER_API_NAMESPACE} @@ -77,9 +77,20 @@ objects: app: cluster-api-controller-manager spec: selector: - matchLabels: - app: cluster-api-controller-manager + app: cluster-api-controller-manager + triggers: + - type: "ConfigChange" + - type: "ImageChange" + imageChangeParams: + automatic: true + containerNames: + - "controller-manager" + from: + # Uses the same ImageStream as our main controllers: + kind: "ImageStreamTag" + name: "${MACHINE_CONTROLLER_IMAGESTREAM}:latest" replicas: 1 + revisionHistoryLimit: 4 template: metadata: labels: @@ -88,10 +99,10 @@ objects: serviceAccountName: cluster-api-controller-manager containers: - name: controller-manager - image: ${CLUSTER_API_IMAGE} + image: " " imagePullPolicy: ${IMAGE_PULL_POLICY} - command: - - "./controller-manager" + args: + - cluster-api-controller-manager resources: requests: cpu: 100m @@ -117,9 +128,6 @@ parameters: - name: CLUSTER_API_NAMESPACE value: openshift-cluster-operator # location of cluster-api container image -- name: CLUSTER_API_IMAGE - value: registry.svc.ci.openshift.org/openshift-cluster-operator/kubernetes-cluster-api:latest -# machine controller image - name: DEFAULT_AVAILABILITY_ZONE value: us-east-1c # pull policy (for testing) diff --git a/contrib/examples/cluster-operator-template.yaml b/contrib/examples/cluster-operator-template.yaml index 6f641adea..a90dd33e9 100644 --- a/contrib/examples/cluster-operator-template.yaml +++ b/contrib/examples/cluster-operator-template.yaml @@ -47,7 +47,9 @@ objects: metadata: name: cluster-operator namespace: ${CLUSTER_OPERATOR_NAMESPACE} - spec: {} + spec: + lookupPolicy: + local: true - kind: BuildConfig apiVersion: build.openshift.io/v1 @@ -102,6 +104,7 @@ objects: automatic: true containerNames: - "apiserver" + - "clusterapiserver" from: kind: "ImageStreamTag" name: "cluster-operator:latest" @@ -122,7 +125,6 @@ objects: - -v - "10" image: cluster-operator:latest - name: apiserver ports: - containerPort: 6443 protocol: TCP @@ -151,6 +153,42 @@ objects: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 2 + - name: clusterapiserver + args: + - "cluster-api-server" + - "--etcd-servers=http://localhost:2379" + - "--tls-cert-file=/var/run/cluster-api-apiserver/apiserver.crt" + - "--tls-private-key-file=/var/run/cluster-api-apiserver/apiserver.key" + - "--secure-port=7443" + image: cluster-operator:latest + ports: + - containerPort: 7443 + protocol: TCP + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/run/cluster-api-apiserver + name: apiserver-ssl + readOnly: true + readinessProbe: + httpGet: + port: 7443 + path: /healthz + scheme: HTTPS + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + httpGet: + port: 7443 + path: /healthz + scheme: HTTPS + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 - name: etcd # TODO: fixed tag here? image: quay.io/coreos/etcd:latest @@ -306,6 +344,25 @@ objects: targetPort: 6443 nodePort: 30443 +# Service for the Cluster API Server +- kind: Service + apiVersion: v1 + metadata: + name: cluster-api-apiserver + namespace: ${CLUSTER_OPERATOR_NAMESPACE} + labels: + app: cluster-operator-apiserver + spec: + type: NodePort + selector: + app: cluster-operator-apiserver + ports: + - name: secure + protocol: TCP + port: 443 + targetPort: 7443 + nodePort: 30444 + # Registration of the cluster-operator API with the aggregator - apiVersion: apiregistration.k8s.io/v1beta1 kind: APIService @@ -333,7 +390,7 @@ objects: version: v1alpha1 service: namespace: ${CLUSTER_OPERATOR_NAMESPACE} - name: cluster-operator-apiserver + name: cluster-api-apiserver caBundle: ${SERVING_CA} groupPriorityMinimum: 10000 versionPriority: 20 diff --git a/glide.lock b/glide.lock index a38e12592..b514b0cdd 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,15 @@ -hash: 3fbb58c4a691706e5d094889e048a77402f2a464e8366e6019396ba4ea46356c -updated: 2018-07-05T17:06:40.980911839-04:00 +hash: 1712058598cc8f0ae49b5ae4bbc8c05faa63679ebc4e66b8b0364030e8628f32 +updated: 2018-07-25T17:28:19.698712279-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +- name: cloud.google.com/go + version: 3b1ae45394a234c385be014e9a488f2bb6eef821 + subpackages: + - compute/metadata + - internal +- name: github.com/asaskevich/govalidator + version: f9ffefc3facfbe0caee3fea233cbb6e8208f4541 - name: github.com/aws/aws-sdk-go version: 7f68df2a5baf19398d17def23a514a6e617e5937 subpackages: @@ -36,6 +43,13 @@ imports: - service/elb - service/elb/elbiface - service/sts +- name: github.com/Azure/go-autorest + version: e14a70c556c8e0db173358d1a903dca345a8e75e + subpackages: + - autorest + - autorest/adal + - autorest/azure + - autorest/date - name: github.com/beorn7/perks version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 subpackages: @@ -121,6 +135,8 @@ imports: version: 782f4967f2dc4564575ca782fe2d04090b5faca8 subpackages: - spew +- name: github.com/dgrijalva/jwt-go + version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20 - name: github.com/elazarl/go-bindata-assetfs version: 3dcc96556217539f50599357fb481ac0dc7439b9 - name: github.com/emicklei/go-restful @@ -134,13 +150,21 @@ imports: - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini - version: cec2bdc49009247305a260b082a18e802d0fe292 + version: d58d458bec3cb5adec4b7ddb41131855eac0b33f +- name: github.com/go-openapi/analysis + version: 8ed83f2ea9f00f945516462951a288eaa68bf0d6 +- name: github.com/go-openapi/errors + version: b2b2befaf267d082d779bcef52d682a47c779517 - name: github.com/go-openapi/jsonpointer version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 - name: github.com/go-openapi/jsonreference version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 +- name: github.com/go-openapi/loads + version: 2a2b323bab96e6b1fdee110e57d959322446e9c9 - name: github.com/go-openapi/spec version: 7abd5745472fff5eb3685386d5fb8bf38683154d +- name: github.com/go-openapi/strfmt + version: 913ee058e387ac83a67e2d9f13acecdcd5769fc6 - name: github.com/go-openapi/swag version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/gogo/protobuf @@ -178,6 +202,15 @@ imports: - OpenAPIv2 - compiler - extensions +- name: github.com/gophercloud/gophercloud + version: 8183543f90d1aef267a5ecc209f2e0715b355acb + subpackages: + - openstack + - openstack/identity/v2/tenants + - openstack/identity/v2/tokens + - openstack/identity/v3/tokens + - openstack/utils + - pagination - name: github.com/gorilla/context version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 - name: github.com/gorilla/mux @@ -217,10 +250,13 @@ imports: - name: github.com/kubernetes-incubator/apiserver-builder version: 14201e804a58a140011a0044b15331f477535f91 subpackages: + - pkg/apiserver - pkg/builders + - pkg/cmd/server - pkg/controller + - pkg/validators - name: github.com/kubernetes/repo-infra - version: c2f9667a4c29e70a39b0e89db2d4f0cab907dbee + version: 6a161d975821e4204dffde89f86065edeb7b6db3 - name: github.com/mailru/easyjson version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d subpackages: @@ -231,6 +267,8 @@ imports: version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a subpackages: - pbutil +- name: github.com/mitchellh/mapstructure + version: f15292f7a699fcc1a38a80977f80a046874ba8ac - name: github.com/mxk/go-flowrate version: cca7078d478f8520f85629ad7c68962d31ed7682 subpackages: @@ -238,9 +276,15 @@ imports: - name: github.com/NYTimes/gziphandler version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb - name: github.com/openshift/api - version: aaea23fe8f58729c8196c8e7f4f40d12bd0e2135 + version: fc4c02c9169b85b9a80353f11ac15ce542a0944a + subpackages: + - apps/v1 - name: github.com/openshift/client-go - version: 4688ad28de2e88110c0ea30179c51b9b205f99be + version: 5b198a036f098f34fbf0c42768382d80708cd2a9 + subpackages: + - apps/clientset/versioned + - apps/clientset/versioned/scheme + - apps/clientset/versioned/typed/apps/v1 - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/peterbourgon/diskv @@ -299,6 +343,7 @@ imports: version: 1c05540f6879653db88113bc4a2b70aec4bd491f subpackages: - context + - context/ctxhttp - html - html/atom - http2 @@ -308,6 +353,13 @@ imports: - lex/httplex - trace - websocket +- name: golang.org/x/oauth2 + version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4 + subpackages: + - google + - internal + - jws + - jwt - name: golang.org/x/sys version: 95c6576299259db960f6c5b9b69ea52422860fce subpackages: @@ -331,6 +383,18 @@ imports: version: f51c12702a4d776e4c1fa9b0fabab841babae631 subpackages: - rate +- name: google.golang.org/appengine + version: b1f26356af11148e710935ed1ac8a7f5702c7612 + subpackages: + - internal + - internal/app_identity + - internal/base + - internal/datastore + - internal/log + - internal/modules + - internal/remote_api + - internal/urlfetch + - urlfetch - name: google.golang.org/genproto version: 09f6ed296fc66555a25fe4ce95173148778dfa85 subpackages: @@ -353,6 +417,11 @@ imports: - transport - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/mgo.v2 + version: 9856a29383ce1c59f308dd1cf0363a79b5bef6b5 + subpackages: + - bson + - internal/json - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/yaml.v2 @@ -669,9 +738,15 @@ imports: - listers/storage/v1alpha1 - listers/storage/v1beta1 - pkg/version + - plugin/pkg/client/auth + - plugin/pkg/client/auth/azure + - plugin/pkg/client/auth/gcp + - plugin/pkg/client/auth/oidc + - plugin/pkg/client/auth/openstack - rest - rest/watch - testing + - third_party/forked/golang/template - tools/auth - tools/cache - tools/clientcmd @@ -690,6 +765,7 @@ imports: - util/flowcontrol - util/homedir - util/integer + - util/jsonpath - util/retry - util/workqueue - name: k8s.io/code-generator @@ -705,7 +781,7 @@ imports: - pkg/util - pkg/util/proto - name: sigs.k8s.io/cluster-api - version: dfa371814c0b39a284662b897ba9b5df4749e973 + version: 0980af3dad42d3d1c4e2e605a7b96560331e8c12 subpackages: - pkg/apis - pkg/apis/cluster @@ -722,9 +798,14 @@ imports: - pkg/client/informers_generated/externalversions/cluster/v1alpha1 - pkg/client/informers_generated/externalversions/internalinterfaces - pkg/client/listers_generated/cluster/v1alpha1 + - pkg/controller - pkg/controller/config - pkg/controller/machine + - pkg/controller/machinedeployment + - pkg/controller/machinedeployment/util + - pkg/controller/machineset - pkg/controller/noderefutil - pkg/controller/sharedinformers + - pkg/openapi - pkg/util testImports: [] diff --git a/glide.yaml b/glide.yaml index 20f58ac73..4e8a4832e 100644 --- a/glide.yaml +++ b/glide.yaml @@ -27,3 +27,5 @@ import: - package: github.com/stretchr/testify - package: github.com/openshift/api - package: github.com/openshift/client-go +- package: github.com/go-openapi/analysis + version: 8ed83f2ea9f00f945516462951a288eaa68bf0d6 diff --git a/pkg/api/register.go b/pkg/api/register.go index 27d15f299..3ffe5fb26 100644 --- a/pkg/api/register.go +++ b/pkg/api/register.go @@ -17,32 +17,30 @@ limitations under the License. package api import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" - co_install "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/install" - - "github.com/kubernetes-incubator/apiserver-builder/pkg/builders" - ca_install "sigs.k8s.io/cluster-api/pkg/apis/cluster/install" + "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/install" ) var ( - groupFactoryRegistry = builders.GroupFactoryRegistry + groupFactoryRegistry = make(announced.APIGroupFactoryRegistry) // Registry is an instance of an API registry. - Registry = builders.Registry + Registry = registered.NewOrDie("") // Scheme for API object types - Scheme = builders.Scheme + Scheme = runtime.NewScheme() // ParameterCodec handles versioning of objects that are converted to query parameters. ParameterCodec = runtime.NewParameterCodec(Scheme) // Codecs for creating a server config - Codecs = builders.Codecs + Codecs = serializer.NewCodecFactory(Scheme) ) func init() { - co_install.Install(groupFactoryRegistry, Registry, Scheme) - - ca_install.Install(groupFactoryRegistry, Registry, Scheme) + install.Install(groupFactoryRegistry, Registry, Scheme) // we need to add the options to empty v1 // TODO fix the server code to avoid this diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 18b2681a9..b57991b3b 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -20,7 +20,6 @@ import ( cov1alpha1 "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/v1alpha1" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - cav1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) // ClusterOperatorAPIServer contains the base GenericAPIServer along with other @@ -39,7 +38,6 @@ func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig { ret := serverstorage.NewResourceConfig() ret.EnableVersions( cov1alpha1.SchemeGroupVersion, - cav1alpha1.SchemeGroupVersion, ) return ret diff --git a/pkg/apiserver/etcd_config.go b/pkg/apiserver/etcd_config.go index c970686a4..18ea6af5a 100644 --- a/pkg/apiserver/etcd_config.go +++ b/pkg/apiserver/etcd_config.go @@ -23,10 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/storage" - - "github.com/openshift/cluster-operator/pkg/apis/clusterapi/validation" - ca_apis "sigs.k8s.io/cluster-api/pkg/apis" - ca "sigs.k8s.io/cluster-api/pkg/apis/cluster" ) // EtcdConfig contains a generic API server Config along with config specific to @@ -101,9 +97,6 @@ func (c completedEtcdConfig) NewServer(stopCh <-chan struct{}) (*ClusterOperator } glog.V(4).Infoln("Installing API groups") - - groupInfos := make([]*genericapiserver.APIGroupInfo, 0) - // default namespace doesn't matter for etcd providers := restStorageProviders("" /* default namespace */, nil) for _, provider := range providers { @@ -114,22 +107,10 @@ func (c completedEtcdConfig) NewServer(stopCh <-chan struct{}) (*ClusterOperator } else if err != nil { return nil, err } - groupInfos = append(groupInfos, groupInfo) - } - - caAPIBuilder := ca_apis.GetClusterAPIBuilder() - validation.ReplaceStorageBuilder(caAPIBuilder, ca.InternalMachine, validation.ReplaceMachineStorageBuilder) - caGroupInfo := caAPIBuilder.Build(roFactory) - if caGroupInfo != nil { - groupInfos = append(groupInfos, caGroupInfo) - } else { - glog.V(4).Infoln("Could not build cluster API groups") - } - for _, groupInfo := range groupInfos { - glog.V(4).Infof("Installing API group %v", groupInfo.GroupMeta.GroupVersion) + glog.V(4).Infof("Installing API group %v", provider.GroupName()) if err := s.GenericAPIServer.InstallAPIGroup(groupInfo); err != nil { - glog.Fatalf("Error installing API group %v: %v", groupInfo.GroupMeta.GroupVersion, err) + glog.Fatalf("Error installing API group %v: %v", provider.GroupName(), err) } // We've successfully installed, so hook the stopCh to the destroy func of all the sucessfully installed apigroups diff --git a/pkg/clusterapi/aws/actuator.go b/pkg/clusterapi/aws/actuator.go index e9ecc667f..78e461be4 100644 --- a/pkg/clusterapi/aws/actuator.go +++ b/pkg/clusterapi/aws/actuator.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterclient "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" @@ -133,8 +132,6 @@ func (a *Actuator) removeStoppedMachine(machine *clusterv1.Machine, client Clien func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (*ec2.Instance, error) { mLog := clustoplog.WithMachine(a.logger, machine) - isMaster := controller.MachineHasRole(machine, capicommon.MasterRole) - // Extract cluster operator cluster clusterSpec, err := controller.AWSClusterProviderConfigFromCluster(cluster) if err != nil { @@ -146,6 +143,8 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. return nil, err } + isMaster := coMachineSetSpec.NodeType == cov1.NodeTypeMaster + region := clusterSpec.Hardware.Region mLog.Debugf("Obtaining EC2 client for region %q", region) client, err := a.clientBuilder(a.kubeClient, coMachineSetSpec, machine.Namespace, region) @@ -220,7 +219,7 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. } // Determine security groups - describeSecurityGroupsInput := buildDescribeSecurityGroupsInput(vpcID, cluster.Name, controller.MachineHasRole(machine, capicommon.MasterRole), coMachineSetSpec.Infra) + describeSecurityGroupsInput := buildDescribeSecurityGroupsInput(vpcID, cluster.Name, isMaster, coMachineSetSpec.Infra) describeSecurityGroupsOutput, err := client.DescribeSecurityGroups(describeSecurityGroupsInput) if err != nil { return nil, err @@ -300,7 +299,7 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. // Only compute nodes should get user data, and it's quite important that masters do not as the // AWS actuator for these is running on the root CO cluster currently, and we do not want to leak // root CO cluster bootstrap kubeconfigs to the target cluster. - userData, err := a.userDataGenerator(controller.MachineHasRole(machine, capicommon.MasterRole), coMachineSetSpec.Infra) + userData, err := a.userDataGenerator(isMaster, coMachineSetSpec.Infra) if err != nil { return nil, err } @@ -637,7 +636,7 @@ func getBootstrapKubeconfig() (string, error) { } func iamRole(machine *clusterv1.Machine, clusterID string) string { - if controller.MachineHasRole(machine, capicommon.MasterRole) { + if isMaster, err := controller.MachineIsMaster(machine); isMaster && err == nil { return masterIAMRole + "_" + clusterID } return defaultIAMRole + "_" + clusterID diff --git a/pkg/clusterapi/aws/actuator_test.go b/pkg/clusterapi/aws/actuator_test.go index 1ab6b2f3a..0104548cf 100644 --- a/pkg/clusterapi/aws/actuator_test.go +++ b/pkg/clusterapi/aws/actuator_test.go @@ -32,7 +32,6 @@ import ( mockaws "github.com/openshift/cluster-operator/pkg/clusterapi/aws/mock" "github.com/openshift/cluster-operator/pkg/controller" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capiv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclientfake "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" @@ -777,11 +776,6 @@ func testMachine(name, clusterName string, nodeType clustopv1.NodeType, isInfra }, }, } - if nodeType == clustopv1.NodeTypeMaster { - machine.Spec.Roles = []capicommon.MachineRole{capicommon.MasterRole} - } else { - machine.Spec.Roles = []capicommon.MachineRole{capicommon.NodeRole} - } if currentStatus != nil { /* rawStatus, _ := controller.EncodeAWSMachineProviderStatus(currentStatus) diff --git a/pkg/controller/awselb/aws_elb_controller.go b/pkg/controller/awselb/aws_elb_controller.go index 648bbb86f..04811c669 100644 --- a/pkg/controller/awselb/aws_elb_controller.go +++ b/pkg/controller/awselb/aws_elb_controller.go @@ -38,7 +38,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capiv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclient "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" informers "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" @@ -130,7 +129,12 @@ type Controller struct { func (c *Controller) addMachine(obj interface{}) { machine := obj.(*capiv1.Machine) - if !controller.MachineHasRole(machine, capicommon.MasterRole) { + isMaster, err := controller.MachineIsMaster(machine) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't decode provider config from %#v: %v", machine, err)) + return + } + if !isMaster { clustoplog.WithMachine(c.logger, machine).Debug("skipping non-master machine") return } @@ -143,7 +147,12 @@ func (c *Controller) updateMachine(old, cur interface{}) { oldMachine := old.(*capiv1.Machine) curMachine := cur.(*capiv1.Machine) - if !controller.MachineHasRole(curMachine, capicommon.MasterRole) { + isMaster, err := controller.MachineIsMaster(curMachine) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't decode provider config from %#v: %v", curMachine, err)) + return + } + if !isMaster { clustoplog.WithMachine(c.logger, curMachine).Debug("skipping non-master machine") return } @@ -168,7 +177,13 @@ func (c *Controller) deleteMachine(obj interface{}) { } } - if !controller.MachineHasRole(machine, capicommon.MasterRole) { + isMaster, err := controller.MachineIsMaster(machine) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't decode provider config from %#v: %v", machine, err)) + return + } + + if !isMaster { clustoplog.WithMachine(c.logger, machine).Debug("skipping non-master machine") return } diff --git a/pkg/controller/awselb/aws_elb_controller_test.go b/pkg/controller/awselb/aws_elb_controller_test.go index 14056daed..950144085 100644 --- a/pkg/controller/awselb/aws_elb_controller_test.go +++ b/pkg/controller/awselb/aws_elb_controller_test.go @@ -35,7 +35,6 @@ import ( mockaws "github.com/openshift/cluster-operator/pkg/clusterapi/aws/mock" "github.com/openshift/cluster-operator/pkg/controller" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capiv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclientfake "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" capiinformers "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions" @@ -312,11 +311,6 @@ func testMachine(generation int64, name, clusterName string, nodeType clustopv1. }, }, } - if nodeType == clustopv1.NodeTypeMaster { - machine.Spec.Roles = []capicommon.MachineRole{capicommon.MasterRole} - } else { - machine.Spec.Roles = []capicommon.MachineRole{capicommon.NodeRole} - } if currentStatus != nil { rawStatus, _ := controller.EncodeAWSMachineProviderStatus(currentStatus) machine.Status.ProviderStatus = rawStatus diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 9e4888748..e99531f5d 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -37,7 +37,6 @@ import ( "github.com/openshift/cluster-operator/pkg/kubernetes/pkg/util/metrics" - clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" capiinformers "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" @@ -716,7 +715,6 @@ func buildMasterMachineSet(clusterDeployment *clustop.ClusterDeployment, cluster machineSet.Spec.Replicas = &replicas machineSet.Spec.Template.Labels = machineSetLabels machineSet.Spec.Template.Spec.Labels = machineSetLabels - machineSet.Spec.Template.Spec.Roles = []clustercommon.MachineRole{clustercommon.MasterRole} providerConfig, err := controller.MachineProviderConfigFromMachineSetConfig(machineSetConfig, &clusterDeployment.Spec, clusterVersion) if err != nil { diff --git a/pkg/controller/clusterinstall/controller.go b/pkg/controller/clusterinstall/controller.go index dc517c92d..4e1eec60f 100644 --- a/pkg/controller/clusterinstall/controller.go +++ b/pkg/controller/clusterinstall/controller.go @@ -41,7 +41,6 @@ import ( "github.com/openshift/cluster-operator/pkg/controller" "github.com/openshift/cluster-operator/pkg/kubernetes/pkg/util/metrics" "github.com/openshift/cluster-operator/pkg/logging" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" capiinformers "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" @@ -345,12 +344,12 @@ func (c *Controller) handleErr(err error, key interface{}) { } func isMasterMachineSet(machineSet *capi.MachineSet) bool { - for _, role := range machineSet.Spec.Template.Spec.Roles { - if role == capicommon.MasterRole { - return true - } + coMachineSetSpec, err := controller.MachineSetSpecFromClusterAPIMachineSpec(&machineSet.Spec.Template.Spec) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't decode provider config from %#v: %v", machineSet, err)) + return false } - return false + return coMachineSetSpec.NodeType == clustop.NodeTypeMaster } type jobFactory func(string) (*v1batch.Job, *kapi.ConfigMap, error) diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 5bb3f35f6..6d7d34229 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -39,7 +39,6 @@ import ( "github.com/openshift/cluster-operator/pkg/api" clusteroperator "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/v1alpha1" clustoplister "github.com/openshift/cluster-operator/pkg/client/listers_generated/clusteroperator/v1alpha1" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capilister "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" ) @@ -635,24 +634,32 @@ func GetMasterMachineSet(cluster *clusteroperator.CombinedCluster, machineSetLis if err != nil { return nil, err } - for _, ms := range machineSets { - for _, role := range ms.Spec.Template.Spec.Roles { - if role == capicommon.MasterRole { - return ms, nil - } + // Given that compute/infra machinesets get created on the target cluster, only + // the master machineset is expected to exist. + for _, machineSet := range machineSets { + if machineSet.DeletionTimestamp.IsZero() { + return machineSet, nil } } return nil, fmt.Errorf("no master machineset found") } -// MachineHasRole returns true if the machine has the given cluster-api role. -func MachineHasRole(machine *clusterapi.Machine, role capicommon.MachineRole) bool { - for _, r := range machine.Spec.Roles { - if r == role { - return true - } +// MachineIsMaster returns true if the machine is part of a cluster's control plane +func MachineIsMaster(machine *clusterapi.Machine) (bool, error) { + coMachineSetSpec, err := MachineSetSpecFromClusterAPIMachineSpec(&machine.Spec) + if err != nil { + return false, err } - return false + return coMachineSetSpec.NodeType == clusteroperator.NodeTypeMaster, nil +} + +// MachineIsInfra returns true if the machine is part of a cluster's control plane +func MachineIsInfra(machine *clusterapi.Machine) (bool, error) { + coMachineSetSpec, err := MachineSetSpecFromClusterAPIMachineSpec(&machine.Spec) + if err != nil { + return false, err + } + return coMachineSetSpec.Infra, nil } // ELBMasterExternalName gets the name of the external master ELB for the cluster diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index e6e3143c2..f6f7ec3c4 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -32,7 +32,6 @@ import ( clusteroperator "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/v1alpha1" - clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) @@ -834,9 +833,7 @@ func newClusterVersion(name string) *clusteroperator.ClusterVersion { } func newMachineSpec(msSpec *clusteroperator.MachineSetSpec) clusterapi.MachineSpec { - ms := clusterapi.MachineSpec{ - Roles: []clustercommon.MachineRole{"Master"}, - } + ms := clusterapi.MachineSpec{} providerConfig, _ := MachineProviderConfigFromMachineSetSpec(msSpec) ms.ProviderConfig.Value = providerConfig return ms diff --git a/test/integration/controller_test.go b/test/integration/controller_test.go index a1366bac1..b33df98a4 100644 --- a/test/integration/controller_test.go +++ b/test/integration/controller_test.go @@ -37,6 +37,7 @@ import ( // avoid error `clusteroperator/v1alpha1 is not enabled` _ "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/install" + capiservertesting "github.com/openshift/cluster-operator/cmd/cluster-api-apiserver/testing" servertesting "github.com/openshift/cluster-operator/cmd/cluster-operator-apiserver/app/testing" clustopv1alpha1 "github.com/openshift/cluster-operator/pkg/apis/clusteroperator/v1alpha1" clustopclientset "github.com/openshift/cluster-operator/pkg/client/clientset_generated/clientset" @@ -49,7 +50,6 @@ import ( infracontroller "github.com/openshift/cluster-operator/pkg/controller/infra" mastercontroller "github.com/openshift/cluster-operator/pkg/controller/master" nodeconfigcontroller "github.com/openshift/cluster-operator/pkg/controller/nodeconfig" - capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" capiv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" capiclientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" capifakeclientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" @@ -62,7 +62,7 @@ const ( testClusterVersionName = "v3-9" ) -func testMachineSet(name string, replicas int32, roles []capicommon.MachineRole, infra bool) *capiv1alpha1.MachineSet { +func testMachineSet(name string, replicas int32, infra bool) *capiv1alpha1.MachineSet { return &capiv1alpha1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, @@ -85,7 +85,6 @@ func testMachineSet(name string, replicas int32, roles []capicommon.MachineRole, }, }, Spec: capiv1alpha1.MachineSpec{ - Roles: roles, ProviderConfig: capiv1alpha1.ProviderConfig{ Value: func() *runtime.RawExtension { r, _ := controller.MachineProviderConfigFromMachineSetSpec(&clustopv1alpha1.MachineSetSpec{ @@ -180,6 +179,7 @@ func TestClusterCreate(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { kubeClient, _, clustopClient, capiClient, _, tearDown := startServerAndControllers(t) + defer tearDown() clusterVersion := &clustopv1alpha1.ClusterVersion{ @@ -342,6 +342,9 @@ func startServerAndControllers(t *testing.T) ( // start the cluster-operator api server apiServerClientConfig, shutdownServer := servertesting.StartTestServerOrDie(t) + // start the cluster-api api server + capiServerClientConfig, shutdownCapiServer := capiservertesting.StartTestServerOrDie(t) + // create a cluster-operator client clustopClient, err := clustopclientset.NewForConfig(apiServerClientConfig) if err != nil { @@ -349,7 +352,7 @@ func startServerAndControllers(t *testing.T) ( } // create a cluster-api client - capiClient, err := capiclientset.NewForConfig(apiServerClientConfig) + capiClient, err := capiclientset.NewForConfig(capiServerClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -475,6 +478,8 @@ func startServerAndControllers(t *testing.T) ( wg.Wait() // Shut down api server shutdownServer() + // Shut down capi server + shutdownCapiServer() } return fakeKubeClient, kubeWatch, clustopClient, capiClient, fakeCAPIClient, shutdown diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml new file mode 100644 index 000000000..76847fcc3 --- /dev/null +++ b/vendor/cloud.google.com/go/.travis.yml @@ -0,0 +1,11 @@ +sudo: false +language: go +go: +- 1.6 +- 1.7 +install: +- go get -v cloud.google.com/go/... +script: +- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" + go test -race -v cloud.google.com/go/... diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 000000000..c364af1da --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 000000000..59fb44df7 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. +1. Get the cloud package by running `go get -d cloud.google.com/go`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make sure your auth is configured correctly by visiting + https://code.googlesource.com, clicking "Generate Password", and following + the directions. +1. Make changes and create a change by running `git codereview change `, +provide a commit message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change and mail as your receive feedback. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. + +After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). +Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project) +(or **Editor** and **Logs Configuration Writer** roles) are added to the +service account. + +Once you create a project, set the following environment variables to be able to +run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create the indexes used in the datastore integration tests with indexes +found in `datastore/testdata/index.yaml`: + +From the project's root directory: + +``` sh +# Set the default project in your env +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account +$ gcloud auth login + +# Create the indexes +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml +``` + +The Sink integration tests in preview/logging require a Google Cloud storage +bucket with the same name as your test project, and with the Stackdriver Logging +service account as owner: +``` sh +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +``` + +Once you've set the environment variables, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 000000000..07509ccb7 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,34 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +Johan Euphrosine +Jonathan Amsterdam +Luna Duclos +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 000000000..a4c5efd82 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 000000000..5e74c46a9 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,245 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go) +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) + +``` go +import "cloud.google.com/go" +``` + +Go packages for Google Cloud Platform services. + +**NOTE:** These packages are under development, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +## News + +_September 8, 2016_ + +* New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +* Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + +## Supported APIs + +Google API | Status | Package +-------------------------------|--------------|----------------------------------------------------------- +[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[BigQuery][cloud-bigquery] | experimental | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Logging][cloud-logging] | experimental | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref] +[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref] +[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] + + +> **Experimental status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at +https://godoc.org/cloud.google.com/go + +Visit or join the +[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) +for updates on these packages. + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. You can see which versions are +currently supported by looking at the lines following `go:` in +[`.travis.yml`](.travis.yml). + +## Authorization + +By default, each API will use [Google Application Default Credentials][default-creds] +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +Manually-configured authorization can be achieved using the +[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. This token source can be passed to the `NewClient` +function for the relevant API using a +[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) +option. + +## Google Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) + +[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully- +managed, schemaless database for storing non-relational data. Cloud Datastore +automatically scales with your users and supports ACID transactions, high availability +of reads and writes, strong consistency for reads and ancestor queries, and eventual +consistency for all other queries. + +Follow the [activation instructions][cloud-datastore-activation] to use the Google +Cloud Datastore API with your project. + +First create a `datastore.Client` to use throughout your application: + +```go +client, err := datastore.NewClient(ctx, "my-project-id") +if err != nil { + log.Fatalln(err) +} +``` + +Then use that client to interact with the API: + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) +} +``` + +## Google Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store +data on Google infrastructure with very high reliability, performance and availability, +and can be used to distribute large data objects to users via direct download. + +https://godoc.org/cloud.google.com/go/storage + +First create a `storage.Client` to use throughout your application: + +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` + +## Google Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect +your services with reliable, many-to-many, asynchronous messaging hosted on Google's +infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation +for building your own robust, global services. + +First create a `pubsub.Client` to use throughout your application: + +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Fatal(err) +} + +// Create an iterator to pull messages via subscription1. +it, err := client.Subscription("subscription1").Pull(ctx) +if err != nil { + log.Println(err) +} +defer it.Stop() + +// Consume N messages from the iterator. +for i := 0; i < N; i++ { + msg, err := it.Next() + if err == pubsub.Done { + break + } + if err != nil { + log.Fatalf("Failed to retrieve message: %v", err) + } + + fmt.Printf("Message %d: %s\n", i, msg.Data) + msg.Done(true) // Acknowledge that we've consumed the message. +} +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage +[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets + +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable + +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery + +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging + +[cloud-vision]: https://cloud.google.com/vision/ +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision + +[cloud-language]: https://cloud.google.com/natural-language +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1 + +[cloud-speech]: https://cloud.google.com/speech +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 + +[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml new file mode 100644 index 000000000..f9ba77476 --- /dev/null +++ b/vendor/cloud.google.com/go/appveyor.yml @@ -0,0 +1,26 @@ +# This file configures AppVeyor (http://www.appveyor.com), +# a Windows-based CI service similar to Travis. + +# Identifier for this run +version: "{build}" + +# Clone the repo into this path, which conforms to the standard +# Go workspace structure. +clone_folder: c:\gopath\src\cloud.google.com\go + +environment: + GOPATH: c:\gopath + +install: + # Info for debugging. + - echo %PATH% + - go version + - go env + - go get -v -d -t ./... + +# Provide a build script, or AppVeyor will call msbuild. +build_script: + - go install -v ./... + +test_script: + - go test -short -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go new file mode 100644 index 000000000..528be133b --- /dev/null +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -0,0 +1,60 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud_test + +import ( + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func Example_applicationDefaultCredentials() { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + // + // This is the recommended way of authorizing and authenticating. + // + // Note: The example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: handle error. + } + // Use the client. + _ = client +} + +func Example_serviceAccountFile() { + // Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS + // and use the Application Default Credentials. + ctx := context.Background() + // Use a JSON key file associated with a Google service account to + // authenticate and authorize. + // Go to https://console.developers.google.com/permissions/serviceaccounts to create + // and download a service account key for your project. + // + // Note: The example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(ctx, + "project-id", + option.WithServiceAccountFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + // Use the client. + _ = client +} diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go new file mode 100644 index 000000000..2ab75cca0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/bigquery.go @@ -0,0 +1,175 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +// TODO(mcgreevy): support dry-run mode when creating jobs. + +import ( + "fmt" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +const prodAddr = "https://www.googleapis.com/bigquery/v2/" + +// A Source is a source of data for the Copy function. +type Source interface { + implementsSource() +} + +// A Destination is a destination of data for the Copy function. +type Destination interface { + implementsDestination() +} + +// An Option is an optional argument to Copy. +type Option interface { + implementsOption() +} + +// A ReadSource is a source of data for the Read function. +type ReadSource interface { + implementsReadSource() +} + +// A ReadOption is an optional argument to Read. +type ReadOption interface { + customizeRead(conf *pagingConf) +} + +const Scope = "https://www.googleapis.com/auth/bigquery" +const userAgent = "gcloud-golang-bigquery/20160429" + +// Client may be used to perform BigQuery operations. +type Client struct { + service service + projectID string +} + +// NewClient constructs a new Client which can perform BigQuery operations. +// Operations performed via the client are billed to the specified GCP project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + s, err := newBigqueryService(httpClient, endpoint) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + + c := &Client{ + service: s, + projectID: projectID, + } + return c, nil +} + +// initJobProto creates and returns a bigquery Job proto. +// The proto is customized using any jobOptions in options. +// The list of Options is returned with the jobOptions removed. +func initJobProto(projectID string, options []Option) (*bq.Job, []Option) { + job := &bq.Job{} + + var other []Option + for _, opt := range options { + if o, ok := opt.(jobOption); ok { + o.customizeJob(job, projectID) + } else { + other = append(other, opt) + } + } + return job, other +} + +// Copy starts a BigQuery operation to copy data from a Source to a Destination. +func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) { + switch dst := dst.(type) { + case *Table: + switch src := src.(type) { + case *GCSReference: + return c.load(ctx, dst, src, options) + case *Table: + return c.cp(ctx, dst, Tables{src}, options) + case Tables: + return c.cp(ctx, dst, src, options) + case *Query: + return c.query(ctx, dst, src, options) + } + case *GCSReference: + if src, ok := src.(*Table); ok { + return c.extract(ctx, dst, src, options) + } + } + return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src) +} + +// Query creates a query with string q. You may optionally set +// DefaultProjectID and DefaultDatasetID on the returned query before using it. +func (c *Client) Query(q string) *Query { + return &Query{Q: q, client: c} +} + +// Read submits a query for execution and returns the results via an Iterator. +// +// Read uses a temporary table to hold the results of the query job. +// +// For more control over how a query is performed, don't use this method but +// instead pass the Query as a Source to Client.Copy, and call Read on the +// resulting Job. +func (q *Query) Read(ctx context.Context, options ...ReadOption) (*Iterator, error) { + dest := &Table{} + job, err := q.client.Copy(ctx, dest, q, WriteTruncate) + if err != nil { + return nil, err + } + return job.Read(ctx, options...) +} + +// executeQuery submits a query for execution and returns the results via an Iterator. +func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) { + dest := &Table{} + job, err := c.Copy(ctx, dest, q, WriteTruncate) + if err != nil { + return nil, err + } + + return c.Read(ctx, job, options...) +} + +// Dataset creates a handle to a BigQuery dataset in the client's project. +func (c *Client) Dataset(id string) *Dataset { + return c.DatasetInProject(c.projectID, id) +} + +// DatasetInProject creates a handle to a BigQuery dataset in the specified project. +func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { + return &Dataset{ + projectID: projectID, + id: datasetID, + service: c.service, + } +} diff --git a/vendor/cloud.google.com/go/bigquery/copy_op.go b/vendor/cloud.google.com/go/bigquery/copy_op.go new file mode 100644 index 000000000..00d39af42 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy_op.go @@ -0,0 +1,47 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type copyOption interface { + customizeCopy(conf *bq.JobConfigurationTableCopy) +} + +func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationTableCopy{} + + dst.customizeCopyDst(payload) + src.customizeCopySrc(payload) + + for _, opt := range options { + o, ok := opt.(copyOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeCopy(payload) + } + + job.Configuration = &bq.JobConfiguration{ + Copy: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/vendor/cloud.google.com/go/bigquery/copy_test.go b/vendor/cloud.google.com/go/bigquery/copy_test.go new file mode 100644 index 000000000..26e40ec3b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy_test.go @@ -0,0 +1,104 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +func defaultCopyJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Copy: &bq.JobConfigurationTableCopy{ + DestinationTable: &bq.TableReference{ + ProjectId: "d-project-id", + DatasetId: "d-dataset-id", + TableId: "d-table-id", + }, + SourceTables: []*bq.TableReference{ + { + ProjectId: "s-project-id", + DatasetId: "s-dataset-id", + TableId: "s-table-id", + }, + }, + }, + }, + } +} + +func TestCopy(t *testing.T) { + testCases := []struct { + dst *Table + src Tables + options []Option + want *bq.Job + }{ + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + src: Tables{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + want: defaultCopyJob(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + src: Tables{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + options: []Option{CreateNever, WriteTruncate}, + want: func() *bq.Job { + j := defaultCopyJob() + j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" + j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling cp: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/create_table_test.go b/vendor/cloud.google.com/go/bigquery/create_table_test.go new file mode 100644 index 000000000..6fdfd6681 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/create_table_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type createTableRecorder struct { + conf *createTableConf + service +} + +func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error { + rec.conf = conf + return nil +} + +func TestCreateTableOptions(t *testing.T) { + s := &createTableRecorder{} + c := &Client{ + projectID: "p", + service: s, + } + ds := c.Dataset("d") + table := ds.Table("t") + exp := time.Now() + q := "query" + if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q)); err != nil { + t.Fatalf("err calling Table.Create: %v", err) + } + want := createTableConf{ + projectID: "p", + datasetID: "d", + tableID: "t", + expiration: exp, + viewQuery: q, + } + if !reflect.DeepEqual(*s.conf, want) { + t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) + } + + sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} + if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil { + t.Fatalf("err calling Table.Create: %v", err) + } + want = createTableConf{ + projectID: "p", + datasetID: "d", + tableID: "t", + expiration: exp, + // No need for an elaborate schema, that is tested in schema_test.go. + schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + } + if !reflect.DeepEqual(*s.conf, want) { + t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go new file mode 100644 index 000000000..bb114e091 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset.go @@ -0,0 +1,55 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import "golang.org/x/net/context" + +// Dataset is a reference to a BigQuery dataset. +type Dataset struct { + projectID string + id string + service service +} + +// ListTables returns a list of all the tables contained in the Dataset. +func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) { + var tables []*Table + + err := getPages("", func(pageToken string) (string, error) { + ts, tok, err := d.service.listTables(ctx, d.projectID, d.id, pageToken) + if err == nil { + tables = append(tables, ts...) + } + return tok, err + }) + + if err != nil { + return nil, err + } + return tables, nil +} + +// Create creates a dataset in the BigQuery service. An error will be returned +// if the dataset already exists. +func (d *Dataset) Create(ctx context.Context) error { + return d.service.insertDataset(ctx, d.id, d.projectID) +} + +// Table creates a handle to a BigQuery table in the dataset. +// To determine if a table exists, call Table.Metadata. +// If the table does not already exist, use Table.Create to create it. +func (d *Dataset) Table(tableID string) *Table { + return &Table{ProjectID: d.projectID, DatasetID: d.id, TableID: tableID, service: d.service} +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset_test.go b/vendor/cloud.google.com/go/bigquery/dataset_test.go new file mode 100644 index 000000000..79c9c4e04 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +// readServiceStub services read requests by returning data from an in-memory list of values. +type listTablesServiceStub struct { + expectedProject, expectedDataset string + values [][]*Table // contains pages of tables. + pageTokens map[string]string // maps incoming page token to returned page token. + + service +} + +func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { + if projectID != s.expectedProject { + return nil, "", errors.New("wrong project id") + } + if datasetID != s.expectedDataset { + return nil, "", errors.New("wrong dataset id") + } + + tables := s.values[0] + s.values = s.values[1:] + return tables, s.pageTokens[pageToken], nil +} + +func TestListTables(t *testing.T) { + t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} + t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} + t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} + testCases := []struct { + data [][]*Table + pageTokens map[string]string + want []*Table + }{ + { + data: [][]*Table{{t1, t2}, {t3}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: []*Table{t1, t2, t3}, + }, + { + data: [][]*Table{{t1, t2}, {t3}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: []*Table{t1, t2}, + }, + } + + for _, tc := range testCases { + c := &Client{ + service: &listTablesServiceStub{ + expectedProject: "x", + expectedDataset: "y", + values: tc.data, + pageTokens: tc.pageTokens, + }, + projectID: "x", + } + got, err := c.Dataset("y").ListTables(context.Background()) + if err != nil { + t.Errorf("err calling ListTables: %v", err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } +} + +func TestListTablesError(t *testing.T) { + c := &Client{ + service: &listTablesServiceStub{ + expectedProject: "x", + expectedDataset: "y", + }, + projectID: "x", + } + // Test that service read errors are propagated back to the caller. + // Passing "not y" as the dataset id will cause the service to return an error. + _, err := c.Dataset("not y").ListTables(context.Background()) + if err == nil { + // Read should not return an error; only Err should. + t.Errorf("ListTables expected: non-nil err, got: nil") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go new file mode 100644 index 000000000..2da205d57 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bigquery provides a client for the BigQuery service. +// +// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected. +package bigquery // import "cloud.google.com/go/bigquery" diff --git a/vendor/cloud.google.com/go/bigquery/error.go b/vendor/cloud.google.com/go/bigquery/error.go new file mode 100644 index 000000000..b59ac6e6e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error.go @@ -0,0 +1,82 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// An Error contains detailed information about a failed bigquery operation. +type Error struct { + // Mirrors bq.ErrorProto, but drops DebugInfo + Location, Message, Reason string +} + +func (e Error) Error() string { + return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) +} + +func errorFromErrorProto(ep *bq.ErrorProto) *Error { + if ep == nil { + return nil + } + return &Error{ + Location: ep.Location, + Message: ep.Message, + Reason: ep.Reason, + } +} + +// A MultiError contains multiple related errors. +type MultiError []error + +func (m MultiError) Error() string { + switch len(m) { + case 0: + return "(0 errors)" + case 1: + return m[0].Error() + case 2: + return m[0].Error() + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) +} + +// RowInsertionError contains all errors that occurred when attempting to insert a row. +type RowInsertionError struct { + InsertID string // The InsertID associated with the affected row. + RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. + Errors MultiError +} + +func (e *RowInsertionError) Error() string { + errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" + return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) +} + +// PutMultiError contains an error for each row which was not successfully inserted +// into a BigQuery table. +type PutMultiError []RowInsertionError + +func (pme PutMultiError) Error() string { + plural := "s" + if len(pme) == 1 { + plural = "" + } + + return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) +} diff --git a/vendor/cloud.google.com/go/bigquery/error_test.go b/vendor/cloud.google.com/go/bigquery/error_test.go new file mode 100644 index 000000000..c0f40bae6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error_test.go @@ -0,0 +1,109 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "strings" + "testing" + + bq "google.golang.org/api/bigquery/v2" +) + +func rowInsertionError(msg string) RowInsertionError { + return RowInsertionError{Errors: []error{errors.New(msg)}} +} + +func TestPutMultiErrorString(t *testing.T) { + testCases := []struct { + errs PutMultiError + want string + }{ + { + errs: PutMultiError{}, + want: "0 row insertions failed", + }, + { + errs: PutMultiError{rowInsertionError("a")}, + want: "1 row insertion failed", + }, + { + errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, + want: "2 row insertions failed", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestMultiErrorString(t *testing.T) { + testCases := []struct { + errs MultiError + want string + }{ + { + errs: MultiError{}, + want: "(0 errors)", + }, + { + errs: MultiError{errors.New("a")}, + want: "a", + }, + { + errs: MultiError{errors.New("a"), errors.New("b")}, + want: "a (and 1 other error)", + }, + { + errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, + want: "a (and 2 other errors)", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestErrorFromErrorProto(t *testing.T) { + for _, test := range []struct { + in *bq.ErrorProto + want *Error + }{ + {nil, nil}, + { + in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, + want: &Error{Location: "L", Message: "M", Reason: "R"}, + }, + } { + if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestErrorString(t *testing.T) { + e := &Error{Location: "", Message: "", Reason: ""} + got := e.Error() + if !strings.Contains(got, "") || !strings.Contains(got, "") || !strings.Contains(got, "") { + t.Errorf(`got %q, expected to see "", "" and ""`, got) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract_op.go b/vendor/cloud.google.com/go/bigquery/extract_op.go new file mode 100644 index 000000000..93ea9c8d4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract_op.go @@ -0,0 +1,59 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type extractOption interface { + customizeExtract(conf *bq.JobConfigurationExtract) +} + +// DisableHeader returns an Option that disables the printing of a header row in exported data. +func DisableHeader() Option { return disableHeader{} } + +type disableHeader struct{} + +func (opt disableHeader) implementsOption() {} + +func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract) { + f := false + conf.PrintHeader = &f +} + +func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationExtract{} + + dst.customizeExtractDst(payload) + src.customizeExtractSrc(payload) + + for _, opt := range options { + o, ok := opt.(extractOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeExtract(payload) + } + + job.Configuration = &bq.JobConfiguration{ + Extract: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/vendor/cloud.google.com/go/bigquery/extract_test.go b/vendor/cloud.google.com/go/bigquery/extract_test.go new file mode 100644 index 000000000..b97c39e04 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract_test.go @@ -0,0 +1,97 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultExtractJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Extract: &bq.JobConfigurationExtract{ + SourceTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + DestinationUris: []string{"uri"}, + }, + }, + } +} + +func TestExtract(t *testing.T) { + testCases := []struct { + dst *GCSReference + src *Table + options []Option + want *bq.Job + }{ + { + dst: defaultGCS, + src: defaultTable(nil), + want: defaultExtractJob(), + }, + { + dst: defaultGCS, + src: defaultTable(nil), + options: []Option{ + DisableHeader(), + }, + want: func() *bq.Job { + j := defaultExtractJob() + f := false + j.Configuration.Extract.PrintHeader = &f + return j + }(), + }, + { + dst: &GCSReference{ + uris: []string{"uri"}, + Compression: Gzip, + DestinationFormat: JSON, + FieldDelimiter: "\t", + }, + src: defaultTable(nil), + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Extract.Compression = "GZIP" + j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Extract.FieldDelimiter = "\t" + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling extract: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/gcs.go b/vendor/cloud.google.com/go/bigquery/gcs.go new file mode 100644 index 000000000..923aeb480 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/gcs.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute +// an input or output to a BigQuery operation. +type GCSReference struct { + uris []string + + // FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data. + // The default is ",". + FieldDelimiter string + + // The number of rows at the top of a CSV file that BigQuery will skip when loading the data. + SkipLeadingRows int64 + + // SourceFormat is the format of the GCS data to be loaded into BigQuery. + // Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV. + SourceFormat DataFormat + // Only used when loading data. + Encoding Encoding + + // Quote is the value used to quote data sections in a CSV file. + // The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset. + // To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true. + // Only used when loading data. + Quote string + ForceZeroQuote bool + + // DestinationFormat is the format to use when writing exported files. + // Allowed values are: CSV, Avro, JSON. The default is CSV. + // CSV is not supported for tables with nested or repeated fields. + DestinationFormat DataFormat + // Only used when writing data. Default is None. + Compression Compression +} + +func (gcs *GCSReference) implementsSource() {} +func (gcs *GCSReference) implementsDestination() {} + +// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. +// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. +// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. +// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. +// For more information about the treatment of wildcards and multiple URIs, +// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple +func (c *Client) NewGCSReference(uri ...string) *GCSReference { + return &GCSReference{uris: uri} +} + +type DataFormat string + +const ( + CSV DataFormat = "CSV" + Avro DataFormat = "AVRO" + JSON DataFormat = "NEWLINE_DELIMITED_JSON" + DatastoreBackup DataFormat = "DATASTORE_BACKUP" +) + +// Encoding specifies the character encoding of data to be loaded into BigQuery. +// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding +// for more details about how this is used. +type Encoding string + +const ( + UTF_8 Encoding = "UTF-8" + ISO_8859_1 Encoding = "ISO-8859-1" +) + +// Compression is the type of compression to apply when writing data to Google Cloud Storage. +type Compression string + +const ( + None Compression = "NONE" + Gzip Compression = "GZIP" +) + +func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad) { + conf.SourceUris = gcs.uris + conf.SkipLeadingRows = gcs.SkipLeadingRows + conf.SourceFormat = string(gcs.SourceFormat) + conf.Encoding = string(gcs.Encoding) + conf.FieldDelimiter = gcs.FieldDelimiter + + if gcs.ForceZeroQuote { + quote := "" + conf.Quote = "e + } else if gcs.Quote != "" { + conf.Quote = &gcs.Quote + } +} + +func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract) { + conf.DestinationUris = gcs.uris + conf.Compression = string(gcs.Compression) + conf.DestinationFormat = string(gcs.DestinationFormat) + conf.FieldDelimiter = gcs.FieldDelimiter +} diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go new file mode 100644 index 000000000..e48226672 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -0,0 +1,154 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "net/http" + "reflect" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" +) + +func TestIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projID := testutil.ProjID() + c, err := NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + ds := c.Dataset("bigquery_integration_test") + if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 + t.Fatal(err) + } + schema := Schema([]*FieldSchema{ + {Name: "name", Type: StringFieldType}, + {Name: "num", Type: IntegerFieldType}, + }) + table := ds.Table("t1") + // Delete the table in case it already exists. (Ignore errors.) + table.Delete(ctx) + // Create the table. + err = table.Create(ctx, schema, TableExpiration(time.Now().Add(5*time.Minute))) + if err != nil { + t.Fatal(err) + } + // Check table metadata. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // TODO(jba): check md more thorougly. + if got, want := md.ID, fmt.Sprintf("%s:%s.%s", projID, ds.id, table.TableID); got != want { + t.Errorf("metadata.ID: got %q, want %q", got, want) + } + if got, want := md.Type, RegularTable; got != want { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + + // List tables in the dataset. + tables, err := ds.ListTables(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := len(tables), 1; got != want { + t.Fatalf("ListTables: got %d, want %d", got, want) + } + want := *table + if got := tables[0]; !reflect.DeepEqual(got, &want) { + t.Errorf("ListTables: got %v, want %v", got, &want) + } + + // Populate the table. + upl := table.NewUploader() + var rows []*ValuesSaver + for i, name := range []string{"a", "b", "c"} { + rows = append(rows, &ValuesSaver{ + Schema: schema, + InsertID: name, + Row: []Value{name, i}, + }) + } + if err := upl.Put(ctx, rows); err != nil { + t.Fatal(err) + } + + checkRead := func(src ReadSource) { + it, err := c.Read(ctx, src) + if err != nil { + t.Fatal(err) + } + for i := 0; it.Next(ctx); i++ { + var vals ValueList + if err := it.Get(&vals); err != nil { + t.Fatal(err) + } + if got, want := vals, rows[i].Row; !reflect.DeepEqual([]Value(got), want) { + t.Errorf("got %v, want %v", got, want) + } + } + } + // Read the table. + checkRead(table) + + // Query the table. + q := &Query{ + Q: "select name, num from t1", + DefaultProjectID: projID, + DefaultDatasetID: ds.id, + } + checkRead(q) + + // Query the long way. + dest := &Table{} + job1, err := c.Copy(ctx, dest, q, WriteTruncate) + if err != nil { + t.Fatal(err) + } + job2, err := c.JobFromID(ctx, job1.ID()) + if err != nil { + t.Fatal(err) + } + // TODO(jba): poll status until job is done + _, err = job2.Status(ctx) + if err != nil { + t.Fatal(err) + } + + checkRead(job2) + + // TODO(jba): patch the table +} + +func hasStatusCode(err error, code int) bool { + if e, ok := err.(*googleapi.Error); ok && e.Code == code { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go new file mode 100644 index 000000000..9f518d20a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator.go @@ -0,0 +1,186 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" +) + +// A pageFetcher returns a page of rows, starting from the row specified by token. +type pageFetcher interface { + fetch(ctx context.Context, s service, token string) (*readDataResult, error) +} + +// Iterator provides access to the result of a BigQuery lookup. +// Next must be called before the first call to Get. +type Iterator struct { + service service + + err error // contains any error encountered during calls to Next. + + // Once Next has been called at least once, schema has the result schema, rs contains the current + // page of data, and nextToken contains the token for fetching the next + // page (empty if there is no more data to be fetched). + schema Schema + rs [][]Value + nextToken string + + // The remaining fields contain enough information to fetch the current + // page of data, and determine which row of data from this page is the + // current row. + + pf pageFetcher + pageToken string + + // The offset from the start of the current page to the current row. + // For a new iterator, this is -1. + offset int64 +} + +func newIterator(s service, pf pageFetcher) *Iterator { + return &Iterator{ + service: s, + pf: pf, + offset: -1, + } +} + +// fetchPage loads the current page of data from the server. +// The contents of rs and nextToken are replaced with the loaded data. +// If there is an error while fetching, the error is stored in it.err and false is returned. +func (it *Iterator) fetchPage(ctx context.Context) bool { + var res *readDataResult + var err error + for { + res, err = it.pf.fetch(ctx, it.service, it.pageToken) + if err != errIncompleteJob { + break + } + } + + if err != nil { + it.err = err + return false + } + + it.schema = res.schema + it.rs = res.rows + it.nextToken = res.pageToken + return true +} + +// getEnoughData loads new data into rs until offset no longer points beyond the end of rs. +func (it *Iterator) getEnoughData(ctx context.Context) bool { + if len(it.rs) == 0 { + // Either we have not yet fetched any pages, or we are iterating over an empty dataset. + // In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken. + // In the latter case, it is harmless to fetch a page of data. + if !it.fetchPage(ctx) { + return false + } + } + + for it.offset >= int64(len(it.rs)) { + // If offset is still outside the bounds of the loaded data, + // but there are no more pages of data to fetch, then we have + // failed to satisfy the offset. + if it.nextToken == "" { + return false + } + + // offset cannot be satisfied with the currently loaded data, + // so we fetch the next page. We no longer need the existing + // cached rows, so we remove them and update the offset to be + // relative to the new page that we're about to fetch. + // NOTE: we can't just set offset to 0, because after + // marshalling/unmarshalling, it's possible for the offset to + // point arbitrarily far beyond the end of rs. + // This can happen if the server returns a different size + // results page before and after marshalling. + it.offset -= int64(len(it.rs)) + it.pageToken = it.nextToken + if !it.fetchPage(ctx) { + return false + } + } + return true +} + +// Next advances the Iterator to the next row, making that row available +// via the Get method. +// Next must be called before the first call to Get or Schema, and blocks until data is available. +// Next returns false when there are no more rows available, either because +// the end of the output was reached, or because there was an error (consult +// the Err method to determine which). +func (it *Iterator) Next(ctx context.Context) bool { + if it.err != nil { + return false + } + + // Advance offset to where we want it to be for the next call to Get. + it.offset++ + + // offset may now point beyond the end of rs, so we fetch data + // until offset is within its bounds again. If there are no more + // results available, offset will be left pointing beyond the bounds + // of rs. + // At the end of this method, rs will contain at least one element + // unless the dataset we are iterating over is empty. + return it.getEnoughData(ctx) +} + +// Err returns the last error encountered by Next, or nil for no error. +func (it *Iterator) Err() error { + return it.err +} + +// verifyState checks that the iterator is pointing to a valid row. +func (it *Iterator) verifyState() error { + if it.err != nil { + return fmt.Errorf("called on iterator in error state: %v", it.err) + } + + // If Next has been called, then offset should always index into a + // valid row in rs, as long as there is still data available. + if it.offset >= int64(len(it.rs)) || it.offset < 0 { + return errors.New("called without preceding successful call to Next") + } + + return nil +} + +// Get loads the current row into dst, which must implement ValueLoader. +func (it *Iterator) Get(dst interface{}) error { + if err := it.verifyState(); err != nil { + return fmt.Errorf("Get %v", err) + } + + if dst, ok := dst.(ValueLoader); ok { + return dst.Load(it.rs[it.offset]) + } + return errors.New("Get called with unsupported argument type") +} + +// Schema returns the schema of the result rows. +func (it *Iterator) Schema() (Schema, error) { + if err := it.verifyState(); err != nil { + return nil, fmt.Errorf("Schema %v", err) + } + + return it.schema, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator_test.go b/vendor/cloud.google.com/go/bigquery/iterator_test.go new file mode 100644 index 000000000..4774904de --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator_test.go @@ -0,0 +1,538 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type fetchResponse struct { + result *readDataResult // The result to return. + err error // The error to return. +} + +// pageFetcherStub services fetch requests by returning data from an in-memory list of values. +type pageFetcherStub struct { + fetchResponses map[string]fetchResponse + + err error +} + +func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + call, ok := pf.fetchResponses[token] + if !ok { + pf.err = fmt.Errorf("Unexpected page token: %q", token) + } + return call.result, call.err +} + +func TestIterator(t *testing.T) { + fetchFailure := errors.New("fetch failure") + + testCases := []struct { + desc string + alreadyConsumed int64 // amount to advance offset before commencing reading. + fetchResponses map[string]fetchResponse + want []ValueList + wantErr error + wantSchema Schema + }{ + { + desc: "Iteration over single empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + schema: Schema{}, + }, + }, + }, + want: []ValueList{}, + wantSchema: Schema{}, + }, + { + desc: "Iteration over single page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Iteration over single page with different schema", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{"1", 2}, {"11", 12}}, + schema: Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{"1", 2}, {"11", 12}}, + wantSchema: Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Iteration over two pages", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Server response includes empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "b": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Fetch error", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + // We returns some data from this fetch, but also an error. + // So the end result should include only data from the previous fetch. + err: fetchFailure, + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + wantErr: fetchFailure, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Skip over a single element", + alreadyConsumed: 1, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{11, 12}, {101, 102}, {111, 112}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Skip over an entire page", + alreadyConsumed: 2, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{101, 102}, {111, 112}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Skip beyond start of second page", + alreadyConsumed: 3, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + want: []ValueList{{111, 112}}, + wantSchema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + { + desc: "Skip beyond all data", + alreadyConsumed: 4, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + }, + }, + }, + }, + // In this test case, Next will return false on its first call, + // so we won't even attempt to call Get. + want: []ValueList{}, + wantSchema: Schema{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newIterator(nil, pf) + it.offset += tc.alreadyConsumed + + values, schema, err := consumeIterator(it) + if err != nil { + t.Fatalf("%s: %v", tc.desc, err) + } + + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) + } + if it.Err() != tc.wantErr { + t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr) + } + if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) { + t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) + } + } +} + +// consumeIterator reads the schema and all values from an iterator and returns them. +func consumeIterator(it *Iterator) ([]ValueList, Schema, error) { + var got []ValueList + var schema Schema + for it.Next(context.Background()) { + var vals ValueList + var err error + if err = it.Get(&vals); err != nil { + return nil, Schema{}, fmt.Errorf("err calling Get: %v", err) + } + got = append(got, vals) + if schema, err = it.Schema(); err != nil { + return nil, Schema{}, fmt.Errorf("err calling Schema: %v", err) + } + } + + return got, schema, nil +} + +func TestGetBeforeNext(t *testing.T) { + // TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators. + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + } + it := newIterator(nil, pf) + var vals ValueList + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get before Next") + } +} + +type delayedPageFetcher struct { + pageFetcherStub + delayCount int +} + +func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + if pf.delayCount > 0 { + pf.delayCount-- + return nil, errIncompleteJob + } + return pf.pageFetcherStub.fetch(ctx, s, token) +} + +func TestIterateIncompleteJob(t *testing.T) { + want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}} + pf := pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + } + dpf := &delayedPageFetcher{ + pageFetcherStub: pf, + delayCount: 1, + } + it := newIterator(nil, dpf) + + values, _, err := consumeIterator(it) + if err != nil { + t.Fatal(err) + } + + if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, want) + } + if it.Err() != nil { + t.Fatalf("iterator.Err: got:\n%v", it.Err()) + } + if dpf.delayCount != 0 { + t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount) + } +} + +func TestGetDuringErrorState(t *testing.T) { + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": {err: errors.New("bang")}, + }, + } + it := newIterator(nil, pf) + var vals ValueList + it.Next(context.Background()) + if it.Err() == nil { + t.Errorf("Expected error after calling Next") + } + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get when iterator has a non-nil error.") + } +} + +func TestGetAfterFinished(t *testing.T) { + testCases := []struct { + alreadyConsumed int64 // amount to advance offset before commencing reading. + fetchResponses map[string]fetchResponse + want []ValueList + }{ + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: []ValueList{{1, 2}, {11, 12}}, + }, + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: []ValueList{}, + }, + { + alreadyConsumed: 100, + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: []ValueList{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newIterator(nil, pf) + it.offset += tc.alreadyConsumed + + values, _, err := consumeIterator(it) + if err != nil { + t.Fatal(err) + } + + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) + } + if it.Err() != nil { + t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err()) + } + // Try calling Get again. + var vals ValueList + if err := it.Get(&vals); err == nil { + t.Errorf("Expected error calling Get when there are no more values") + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go new file mode 100644 index 000000000..1f1467f0d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job.go @@ -0,0 +1,131 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// A Job represents an operation which has been submitted to BigQuery for processing. +type Job struct { + service service + projectID string + jobID string + + isQuery bool +} + +// JobFromID creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package. For example, the job may have +// been created in the BigQuery console. +func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { + jobType, err := c.service.getJobType(ctx, c.projectID, id) + if err != nil { + return nil, err + } + + return &Job{ + service: c.service, + projectID: c.projectID, + jobID: id, + isQuery: jobType == queryJobType, + }, nil +} + +func (j *Job) ID() string { + return j.jobID +} + +// State is one of a sequence of states that a Job progresses through as it is processed. +type State int + +const ( + Pending State = iota + Running + Done +) + +// JobStatus contains the current State of a job, and errors encountered while processing that job. +type JobStatus struct { + State State + + err error + + // All errors encountered during the running of the job. + // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. + Errors []*Error +} + +// jobOption is an Option which modifies a bq.Job proto. +// This is used for configuring values that apply to all operations, such as setting a jobReference. +type jobOption interface { + customizeJob(job *bq.Job, projectID string) +} + +type jobID string + +// JobID returns an Option that sets the job ID of a BigQuery job. +// If this Option is not used, a job ID is generated automatically. +func JobID(ID string) Option { + return jobID(ID) +} + +func (opt jobID) implementsOption() {} + +func (opt jobID) customizeJob(job *bq.Job, projectID string) { + job.JobReference = &bq.JobReference{ + JobId: string(opt), + ProjectId: projectID, + } +} + +// Done reports whether the job has completed. +// After Done returns true, the Err method will return an error if the job completed unsuccesfully. +func (s *JobStatus) Done() bool { + return s.State == Done +} + +// Err returns the error that caused the job to complete unsuccesfully (if any). +func (s *JobStatus) Err() error { + return s.err +} + +// Status returns the current status of the job. It fails if the Status could not be determined. +func (j *Job) Status(ctx context.Context) (*JobStatus, error) { + return j.service.jobStatus(ctx, j.projectID, j.jobID) +} + +// Cancel requests that a job be cancelled. This method returns without waiting for +// cancellation to take effect. To check whether the job has terminated, use Job.Status. +// Cancelled jobs may still incur costs. +func (j *Job) Cancel(ctx context.Context) error { + return j.service.jobCancel(ctx, j.projectID, j.jobID) +} + +func (j *Job) implementsReadSource() {} + +func (j *Job) customizeReadQuery(cursor *readQueryConf) error { + // There are mulitple kinds of jobs, but only a query job is suitable for reading. + if !j.isQuery { + return errors.New("Cannot read from a non-query job") + } + + cursor.projectID = j.projectID + cursor.jobID = j.jobID + return nil +} diff --git a/vendor/cloud.google.com/go/bigquery/legacy.go b/vendor/cloud.google.com/go/bigquery/legacy.go new file mode 100644 index 000000000..32cc42e2e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/legacy.go @@ -0,0 +1,70 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" +) + +// OpenTable creates a handle to an existing BigQuery table. If the table does +// not already exist, subsequent uses of the *Table will fail. +// +// Deprecated: use Client.DatasetInProject.Table instead. +func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table { + return c.Table(projectID, datasetID, tableID) +} + +// Table creates a handle to a BigQuery table. +// +// Use this method to reference a table in a project other than that of the +// Client. +// +// Deprecated: use Client.DatasetInProject.Table instead. +func (c *Client) Table(projectID, datasetID, tableID string) *Table { + return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service} +} + +// CreateTable creates a table in the BigQuery service and returns a handle to it. +// +// Deprecated: use Table.Create instead. +func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) { + t := c.Table(projectID, datasetID, tableID) + if err := t.Create(ctx, options...); err != nil { + return nil, err + } + return t, nil +} + +// Read fetches data from a ReadSource and returns the data via an Iterator. +// +// Deprecated: use Query.Read, Job.Read or Table.Read instead. +func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) { + switch src := src.(type) { + case *Job: + return src.Read(ctx, options...) + case *Query: + // For compatibility, support Query values created by literal, rather + // than Client.Query. + if src.client == nil { + src.client = c + } + return src.Read(ctx, options...) + case *Table: + return src.Read(ctx, options...) + } + return nil, fmt.Errorf("src (%T) does not support the Read operation", src) +} diff --git a/vendor/cloud.google.com/go/bigquery/load_op.go b/vendor/cloud.google.com/go/bigquery/load_op.go new file mode 100644 index 000000000..5d01e67d5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load_op.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type loadOption interface { + customizeLoad(conf *bq.JobConfigurationLoad) +} + +// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table. +// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table. +// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup. +// schema must not be nil. +func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} } + +type destSchema struct { + Schema +} + +func (opt destSchema) implementsOption() {} + +func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.Schema = opt.asTableSchema() +} + +// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored. +// If this maximum is exceeded, the operation will be unsuccessful. +func MaxBadRecords(n int64) Option { return maxBadRecords(n) } + +type maxBadRecords int64 + +func (opt maxBadRecords) implementsOption() {} + +func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.MaxBadRecords = int64(opt) +} + +// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls. +func AllowJaggedRows() Option { return allowJaggedRows{} } + +type allowJaggedRows struct{} + +func (opt allowJaggedRows) implementsOption() {} + +func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.AllowJaggedRows = true +} + +// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data. +func AllowQuotedNewlines() Option { return allowQuotedNewlines{} } + +type allowQuotedNewlines struct{} + +func (opt allowQuotedNewlines) implementsOption() {} + +func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.AllowQuotedNewlines = true +} + +// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated. +// Unknown values are ignored. For CSV this ignores extra values at the end of a line. +// For JSON this ignores named values that do not match any column name. +// If this Option is not used, records containing unknown values are treated as bad records. +// The MaxBadRecords Option can be used to customize how bad records are handled. +func IgnoreUnknownValues() Option { return ignoreUnknownValues{} } + +type ignoreUnknownValues struct{} + +func (opt ignoreUnknownValues) implementsOption() {} + +func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.IgnoreUnknownValues = true +} + +func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationLoad{} + + dst.customizeLoadDst(payload) + src.customizeLoadSrc(payload) + + for _, opt := range options { + o, ok := opt.(loadOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeLoad(payload) + } + + job.Configuration = &bq.JobConfiguration{ + Load: payload, + } + return c.service.insertJob(ctx, job, c.projectID) +} diff --git a/vendor/cloud.google.com/go/bigquery/load_test.go b/vendor/cloud.google.com/go/bigquery/load_test.go new file mode 100644 index 000000000..19482b93a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load_test.go @@ -0,0 +1,198 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultLoadJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + SourceUris: []string{"uri"}, + }, + }, + } +} + +func stringFieldSchema() *FieldSchema { + return &FieldSchema{Name: "fieldname", Type: StringFieldType} +} + +func nestedFieldSchema() *FieldSchema { + return &FieldSchema{ + Name: "nested", + Type: RecordFieldType, + Schema: Schema{stringFieldSchema()}, + } +} + +func bqStringFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "fieldname", + Type: "STRING", + } +} + +func bqNestedFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "nested", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, + } +} + +func TestLoad(t *testing.T) { + testCases := []struct { + dst *Table + src *GCSReference + options []Option + want *bq.Job + }{ + { + dst: defaultTable(nil), + src: defaultGCS, + want: defaultLoadJob(), + }, + { + dst: defaultTable(nil), + src: defaultGCS, + options: []Option{ + MaxBadRecords(1), + AllowJaggedRows(), + AllowQuotedNewlines(), + IgnoreUnknownValues(), + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.MaxBadRecords = 1 + j.Configuration.Load.AllowJaggedRows = true + j.Configuration.Load.AllowQuotedNewlines = true + j.Configuration.Load.IgnoreUnknownValues = true + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + options: []Option{CreateNever, WriteTruncate}, + src: defaultGCS, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.CreateDisposition = "CREATE_NEVER" + j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: defaultGCS, + options: []Option{ + DestinationSchema(Schema{ + stringFieldSchema(), + nestedFieldSchema(), + }), + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Schema = &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }} + return j + }(), + }, + { + dst: defaultTable(nil), + src: &GCSReference{ + uris: []string{"uri"}, + SkipLeadingRows: 1, + SourceFormat: JSON, + Encoding: UTF_8, + FieldDelimiter: "\t", + Quote: "-", + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + { + dst: defaultTable(nil), + src: &GCSReference{ + uris: []string{"uri"}, + Quote: "", + }, + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Quote = nil + return j + }(), + }, + { + dst: defaultTable(nil), + src: &GCSReference{ + uris: []string{"uri"}, + Quote: "", + ForceZeroQuote: true, + }, + want: func() *bq.Job { + j := defaultLoadJob() + empty := "" + j.Configuration.Load.Quote = &empty + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling load: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go new file mode 100644 index 000000000..55cdfe621 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query.go @@ -0,0 +1,44 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// Query represents a query to be executed. Use Client.Query to create a query. +type Query struct { + // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. + Q string + + // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. + // If DefaultProjectID is set, DefaultDatasetID must also be set. + DefaultProjectID string + DefaultDatasetID string + + client *Client +} + +func (q *Query) implementsSource() {} + +func (q *Query) implementsReadSource() {} + +func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery) { + conf.Query = q.Q + if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { + conf.DefaultDataset = &bq.DatasetReference{ + DatasetId: q.DefaultDatasetID, + ProjectId: q.DefaultProjectID, + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/query_op.go b/vendor/cloud.google.com/go/bigquery/query_op.go new file mode 100644 index 000000000..7f83ec5a5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query_op.go @@ -0,0 +1,148 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type queryOption interface { + customizeQuery(conf *bq.JobConfigurationQuery) +} + +// DisableQueryCache returns an Option that prevents results being fetched from the query cache. +// If this Option is not used, results are fetched from the cache if they are available. +// The query cache is a best-effort cache that is flushed whenever tables in the query are modified. +// Cached results are only available when TableID is unspecified in the query's destination Table. +// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching +func DisableQueryCache() Option { return disableQueryCache{} } + +type disableQueryCache struct{} + +func (opt disableQueryCache) implementsOption() {} + +func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery) { + f := false + conf.UseQueryCache = &f +} + +// DisableFlattenedResults returns an Option that prevents results being flattened. +// If this Option is not used, results from nested and repeated fields are flattened. +// DisableFlattenedResults implies AllowLargeResults +// For more information, see https://cloud.google.com/bigquery/docs/data#nested +func DisableFlattenedResults() Option { return disableFlattenedResults{} } + +type disableFlattenedResults struct{} + +func (opt disableFlattenedResults) implementsOption() {} + +func (opt disableFlattenedResults) customizeQuery(conf *bq.JobConfigurationQuery) { + f := false + conf.FlattenResults = &f + // DisableFlattenedResults implies AllowLargeResults + allowLargeResults{}.customizeQuery(conf) +} + +// AllowLargeResults returns an Option that allows the query to produce arbitrarily large result tables. +// The destination must be a table. +// When using this option, queries will take longer to execute, even if the result set is small. +// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults +func AllowLargeResults() Option { return allowLargeResults{} } + +type allowLargeResults struct{} + +func (opt allowLargeResults) implementsOption() {} + +func (opt allowLargeResults) customizeQuery(conf *bq.JobConfigurationQuery) { + conf.AllowLargeResults = true +} + +// JobPriority returns an Option that causes a query to be scheduled with the specified priority. +// The default priority is InteractivePriority. +// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries +func JobPriority(priority string) Option { return jobPriority(priority) } + +type jobPriority string + +func (opt jobPriority) implementsOption() {} + +func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery) { + conf.Priority = string(opt) +} + +const ( + BatchPriority = "BATCH" + InteractivePriority = "INTERACTIVE" +) + +// MaxBillingTier returns an Option that sets the maximum billing tier for a Query. +// Queries that have resource usage beyond this tier will fail (without +// incurring a charge). If this Option is not used, the project default will be used. +func MaxBillingTier(tier int) Option { return maxBillingTier(tier) } + +type maxBillingTier int + +func (opt maxBillingTier) implementsOption() {} + +func (opt maxBillingTier) customizeQuery(conf *bq.JobConfigurationQuery) { + tier := int64(opt) + conf.MaximumBillingTier = &tier +} + +// MaxBytesBilled returns an Option that limits the number of bytes billed for +// this job. Queries that would exceed this limit will fail (without incurring +// a charge). +// If this Option is not used, or bytes is < 1, the project default will be +// used. +func MaxBytesBilled(bytes int64) Option { return maxBytesBilled(bytes) } + +type maxBytesBilled int64 + +func (opt maxBytesBilled) implementsOption() {} + +func (opt maxBytesBilled) customizeQuery(conf *bq.JobConfigurationQuery) { + if opt >= 1 { + conf.MaximumBytesBilled = int64(opt) + } +} + +func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) { + job, options := initJobProto(c.projectID, options) + payload := &bq.JobConfigurationQuery{} + + dst.customizeQueryDst(payload) + src.customizeQuerySrc(payload) + + for _, opt := range options { + o, ok := opt.(queryOption) + if !ok { + return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) + } + o.customizeQuery(payload) + } + + job.Configuration = &bq.JobConfiguration{ + Query: payload, + } + j, err := c.service.insertJob(ctx, job, c.projectID) + if err != nil { + return nil, err + } + j.isQuery = true + return j, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/query_test.go b/vendor/cloud.google.com/go/bigquery/query_test.go new file mode 100644 index 000000000..bb93d539a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query_test.go @@ -0,0 +1,168 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultQueryJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + Query: "query string", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + }, + }, + } +} + +func TestQuery(t *testing.T) { + testCases := []struct { + dst *Table + src *Query + options []Option + want *bq.Job + }{ + { + dst: defaultTable(nil), + src: defaultQuery, + want: defaultQueryJob(), + }, + { + dst: defaultTable(nil), + src: &Query{ + Q: "query string", + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + return j + }(), + }, + { + dst: &Table{}, + src: defaultQuery, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable = nil + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: defaultQuery, + options: []Option{CreateNever, WriteTruncate}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Query.CreateDisposition = "CREATE_NEVER" + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{DisableQueryCache()}, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.UseQueryCache = &f + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{AllowLargeResults()}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{DisableFlattenedResults()}, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.FlattenResults = &f + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{JobPriority("low")}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.Priority = "low" + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{MaxBillingTier(3), MaxBytesBilled(5)}, + want: func() *bq.Job { + j := defaultQueryJob() + tier := int64(3) + j.Configuration.Query.MaximumBillingTier = &tier + j.Configuration.Query.MaximumBytesBilled = 5 + return j + }(), + }, + { + dst: defaultTable(nil), + src: defaultQuery, + options: []Option{MaxBytesBilled(-1)}, + want: defaultQueryJob(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + } + if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { + t.Errorf("err calling query: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/read_op.go b/vendor/cloud.google.com/go/bigquery/read_op.go new file mode 100644 index 000000000..93089080b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read_op.go @@ -0,0 +1,70 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import "golang.org/x/net/context" + +// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery. +func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) } + +type recordsPerRequest int64 + +func (opt recordsPerRequest) customizeRead(conf *pagingConf) { + conf.recordsPerRequest = int64(opt) + conf.setRecordsPerRequest = true +} + +// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from. +func StartIndex(i uint64) ReadOption { return startIndex(i) } + +type startIndex uint64 + +func (opt startIndex) customizeRead(conf *pagingConf) { + conf.startIndex = uint64(opt) +} + +func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + return s.readTabledata(ctx, conf, token) +} + +// Read fetches the contents of the table. +func (t *Table) Read(_ context.Context, options ...ReadOption) (*Iterator, error) { + conf := &readTableConf{} + t.customizeReadSrc(conf) + + for _, o := range options { + o.customizeRead(&conf.paging) + } + + return newIterator(t.service, conf), nil +} + +func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + return s.readQuery(ctx, conf, token) +} + +// Read fetches the results of a query job. +func (j *Job) Read(_ context.Context, options ...ReadOption) (*Iterator, error) { + conf := &readQueryConf{} + if err := j.customizeReadQuery(conf); err != nil { + return nil, err + } + + for _, o := range options { + o.customizeRead(&conf.paging) + } + + return newIterator(j.service, conf), nil +} diff --git a/vendor/cloud.google.com/go/bigquery/read_test.go b/vendor/cloud.google.com/go/bigquery/read_test.go new file mode 100644 index 000000000..81c0941c5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read_test.go @@ -0,0 +1,308 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type readTabledataArgs struct { + conf *readTableConf + tok string +} + +type readQueryArgs struct { + conf *readQueryConf + tok string +} + +// readServiceStub services read requests by returning data from an in-memory list of values. +type readServiceStub struct { + // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. + values [][][]Value // contains pages / rows / columns. + pageTokens map[string]string // maps incoming page token to returned page token. + + // arguments are recorded for later inspection. + readTabledataCalls []readTabledataArgs + readQueryCalls []readQueryArgs + + service +} + +func (s *readServiceStub) readValues(tok string) *readDataResult { + result := &readDataResult{ + pageToken: s.pageTokens[tok], + rows: s.values[0], + } + s.values = s.values[1:] + + return result +} +func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token}) + return s.readValues(token), nil +} + +func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token}) + return s.readValues(token), nil +} + +func TestRead(t *testing.T) { + // The data for the service stub to return is populated for each test case in the testCases for loop. + service := &readServiceStub{} + c := &Client{ + service: service, + } + + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + + for _, src := range []ReadSource{defaultTable(service), queryJob} { + testCases := []struct { + data [][][]Value + pageTokens map[string]string + want []ValueList + }{ + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, + }, + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: []ValueList{{1, 2}, {11, 12}}, + }, + } + + for _, tc := range testCases { + service.values = tc.data + service.pageTokens = tc.pageTokens + if got, ok := doRead(t, c, src); ok { + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } + } + } +} + +// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned. +func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) { + it, err := c.Read(context.Background(), src) + if err != nil { + t.Errorf("err calling Read: %v", err) + return nil, false + } + var got []ValueList + for it.Next(context.Background()) { + var vals ValueList + if err := it.Get(&vals); err != nil { + t.Errorf("err calling Get: %v", err) + return nil, false + } else { + got = append(got, vals) + } + } + + return got, true +} + +func TestNoMoreValues(t *testing.T) { + c := &Client{ + service: &readServiceStub{ + values: [][][]Value{{{1, 2}, {11, 12}}}, + }, + } + it, err := c.Read(context.Background(), defaultTable(c.service)) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + var vals ValueList + // We expect to retrieve two values and then fail on the next attempt. + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if err := it.Get(&vals); err != nil { + t.Fatalf("Get: got: %v: want: nil", err) + } + if it.Next(context.Background()) { + t.Fatalf("Next: got: true: want: false") + } + if err := it.Get(&vals); err == nil { + t.Fatalf("Get: got: %v: want: non-nil", err) + } +} + +// delayedReadStub simulates reading results from a query that has not yet +// completed. Its readQuery method initially reports that the query job is not +// yet complete. Subsequently, it proxies the request through to another +// service stub. +type delayedReadStub struct { + numDelays int + + readServiceStub +} + +func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + if s.numDelays > 0 { + s.numDelays-- + return nil, errIncompleteJob + } + return s.readServiceStub.readQuery(ctx, conf, token) +} + +// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete. +func TestIncompleteJob(t *testing.T) { + service := &delayedReadStub{ + numDelays: 2, + readServiceStub: readServiceStub{ + values: [][][]Value{{{1, 2}}}, + }, + } + c := &Client{service: service} + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + it, err := c.Read(context.Background(), queryJob) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + var got ValueList + want := ValueList{1, 2} + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + if err := it.Get(&got); err != nil { + t.Fatalf("Error calling Get: %v", err) + } + if service.numDelays != 0 { + t.Errorf("remaining numDelays : got: %v want:0", service.numDelays) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, want) + } +} + +type errorReadService struct { + service +} + +func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + return nil, errors.New("bang!") +} + +func TestReadError(t *testing.T) { + // test that service read errors are propagated back to the caller. + c := &Client{service: &errorReadService{}} + it, err := c.Read(context.Background(), defaultTable(c.service)) + if err != nil { + // Read should not return an error; only Err should. + t.Fatalf("err calling Read: %v", err) + } + if it.Next(context.Background()) { + t.Fatalf("Next: got: true: want: false") + } + if err := it.Err(); err.Error() != "bang!" { + t.Fatalf("Get: got: %v: want: bang!", err) + } +} + +func TestReadTabledataOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{service: s} + it, err := c.Read(context.Background(), defaultTable(s), RecordsPerRequest(5)) + + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + + want := []readTabledataArgs{{ + conf: &readTableConf{ + projectID: "project-id", + datasetID: "dataset-id", + tableID: "table-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readTabledataCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) + } +} + +func TestReadQueryOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{service: s} + + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: s, + isQuery: true, + } + it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5)) + + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + if !it.Next(context.Background()) { + t.Fatalf("Next: got: false: want: true") + } + + want := []readQueryArgs{{ + conf: &readQueryConf{ + projectID: "project-id", + jobID: "job-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readQueryCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go new file mode 100644 index 000000000..0733a9938 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema.go @@ -0,0 +1,233 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + + bq "google.golang.org/api/bigquery/v2" +) + +// Schema describes the fields in a table or query result. +type Schema []*FieldSchema + +type FieldSchema struct { + // The field name. + // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), + // and must start with a letter or underscore. + // The maximum length is 128 characters. + Name string + + // A description of the field. The maximum length is 16,384 characters. + Description string + + // Whether the field may contain multiple values. + Repeated bool + // Whether the field is required. Ignored if Repeated is true. + Required bool + + // The field data type. If Type is Record, then this field contains a nested schema, + // which is described by Schema. + Type FieldType + // Describes the nested schema if Type is set to Record. + Schema Schema +} + +func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { + tfs := &bq.TableFieldSchema{ + Description: fs.Description, + Name: fs.Name, + Type: string(fs.Type), + } + + if fs.Repeated { + tfs.Mode = "REPEATED" + } else if fs.Required { + tfs.Mode = "REQUIRED" + } // else leave as default, which is interpreted as NULLABLE. + + for _, f := range fs.Schema { + tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) + } + + return tfs +} + +func (s Schema) asTableSchema() *bq.TableSchema { + var fields []*bq.TableFieldSchema + for _, f := range s { + fields = append(fields, f.asTableFieldSchema()) + } + return &bq.TableSchema{Fields: fields} +} + +// customizeCreateTable allows a Schema to be used directly as an option to CreateTable. +func (s Schema) customizeCreateTable(conf *createTableConf) { + conf.schema = s.asTableSchema() +} + +func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { + fs := &FieldSchema{ + Description: tfs.Description, + Name: tfs.Name, + Repeated: tfs.Mode == "REPEATED", + Required: tfs.Mode == "REQUIRED", + Type: FieldType(tfs.Type), + } + + for _, f := range tfs.Fields { + fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) + } + return fs +} + +func convertTableSchema(ts *bq.TableSchema) Schema { + var s Schema + for _, f := range ts.Fields { + s = append(s, convertTableFieldSchema(f)) + } + return s +} + +type FieldType string + +const ( + StringFieldType FieldType = "STRING" + IntegerFieldType FieldType = "INTEGER" + FloatFieldType FieldType = "FLOAT" + BooleanFieldType FieldType = "BOOLEAN" + TimestampFieldType FieldType = "TIMESTAMP" + RecordFieldType FieldType = "RECORD" +) + +var errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") +var errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") + +// InferSchema tries to derive a BigQuery schema from the supplied struct value. +// NOTE: All fields in the returned Schema are configured to be required, +// unless the corresponding field in the supplied struct is a slice or array. +// It is considered an error if the struct (including nested structs) contains +// any exported fields that are pointers or one of the following types: +// map, interface, complex64, complex128, func, chan. +// In these cases, an error will be returned. +// Future versions may handle these cases without error. +func InferSchema(st interface{}) (Schema, error) { + return inferStruct(reflect.TypeOf(st)) +} + +func inferStruct(rt reflect.Type) (Schema, error) { + switch rt.Kind() { + case reflect.Struct: + return inferFields(rt) + default: + return nil, errNoStruct + } + +} + +// inferFieldSchema infers the FieldSchema for a Go type +func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { + switch { + case isByteSlice(rt): + return &FieldSchema{Required: true, Type: StringFieldType}, nil + case isTimeTime(rt): + return &FieldSchema{Required: true, Type: TimestampFieldType}, nil + case isRepeated(rt): + et := rt.Elem() + + if isRepeated(et) && !isByteSlice(et) { + // Multi dimensional slices/arrays are not supported by BigQuery + return nil, errUnsupportedFieldType + } + + f, err := inferFieldSchema(et) + if err != nil { + return nil, err + } + f.Repeated = true + f.Required = false + return f, nil + case isStruct(rt): + nested, err := inferFields(rt) + if err != nil { + return nil, err + } + return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil + } + + switch rt.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + return &FieldSchema{Required: true, Type: IntegerFieldType}, nil + case reflect.String: + return &FieldSchema{Required: true, Type: StringFieldType}, nil + case reflect.Bool: + return &FieldSchema{Required: true, Type: BooleanFieldType}, nil + case reflect.Float32, reflect.Float64: + return &FieldSchema{Required: true, Type: FloatFieldType}, nil + default: + return nil, errUnsupportedFieldType + } +} + +// inferFields extracts all exported field types from struct type. +func inferFields(rt reflect.Type) (Schema, error) { + var s Schema + + for i := 0; i < rt.NumField(); i++ { + field := rt.Field(i) + if field.PkgPath != "" { + // field is unexported. + continue + } + + if field.Anonymous { + // TODO(nightlyone) support embedded (see https://github.com/GoogleCloudPlatform/google-cloud-go/issues/238) + return nil, errUnsupportedFieldType + } + + f, err := inferFieldSchema(field.Type) + if err != nil { + return nil, err + } + f.Name = field.Name + + s = append(s, f) + } + + return s, nil +} + +func isByteSlice(rt reflect.Type) bool { + return rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 +} + +func isTimeTime(rt reflect.Type) bool { + return rt.PkgPath() == "time" && rt.Name() == "Time" +} + +func isStruct(rt reflect.Type) bool { + return rt.Kind() == reflect.Struct +} + +func isRepeated(rt reflect.Type) bool { + switch rt.Kind() { + case reflect.Slice, reflect.Array: + return true + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go new file mode 100644 index 000000000..f0320aaba --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -0,0 +1,495 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + "testing" + "time" + + bq "google.golang.org/api/bigquery/v2" +) + +func (fs *FieldSchema) GoString() string { + if fs == nil { + return "" + } + + return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", + fs.Name, + fs.Description, + fs.Repeated, + fs.Required, + fs.Type, + fmt.Sprintf("%#v", fs.Schema), + ) +} + +func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Description: desc, + Name: name, + Mode: mode, + Type: typ, + } +} + +func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { + return &FieldSchema{ + Description: desc, + Name: name, + Repeated: repeated, + Required: required, + Type: FieldType(typ), + } +} + +func TestSchemaConversion(t *testing.T) { + testCases := []struct { + schema Schema + bqSchema *bq.TableSchema + }{ + { + // required + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, true), + }, + }, + { + // repeated + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", true, false), + }, + }, + { + // nullable, string + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, false), + }, + }, + { + // integer + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "INTEGER", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "INTEGER", false, false), + }, + }, + { + // float + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "FLOAT", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "FLOAT", false, false), + }, + }, + { + // boolean + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "BOOLEAN", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "BOOLEAN", false, false), + }, + }, + { + // timestamp + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "TIMESTAMP", false, false), + }, + }, + { + // nested + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + { + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Mode: "REQUIRED", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("inner field", "inner", "STRING", ""), + }, + }, + }, + }, + schema: Schema{ + &FieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Description: "inner field", + Name: "inner", + Type: "STRING", + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + bqSchema := tc.schema.asTableSchema() + if !reflect.DeepEqual(bqSchema, tc.bqSchema) { + t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema) + } + schema := convertTableSchema(tc.bqSchema) + if !reflect.DeepEqual(schema, tc.schema) { + t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) + } + } +} + +type allStrings struct { + String string + ByteSlice []byte +} + +type allSignedIntegers struct { + Int64 int64 + Int32 int32 + Int16 int16 + Int8 int8 + Int int +} + +type allUnsignedIntegers struct { + Uint64 uint64 + Uint32 uint32 + Uint16 uint16 + Uint8 uint8 + Uintptr uintptr + Uint uint +} + +type allFloat struct { + Float64 float64 + Float32 float32 + // NOTE: Complex32 and Complex64 are unsupported by BigQuery +} + +type allBoolean struct { + Bool bool +} + +type allTime struct { + Time time.Time +} + +func TestSimpleInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: allSignedIntegers{}, + want: Schema{ + fieldSchema("", "Int64", "INTEGER", false, true), + fieldSchema("", "Int32", "INTEGER", false, true), + fieldSchema("", "Int16", "INTEGER", false, true), + fieldSchema("", "Int8", "INTEGER", false, true), + fieldSchema("", "Int", "INTEGER", false, true), + }, + }, + { + in: allUnsignedIntegers{}, + want: Schema{ + fieldSchema("", "Uint64", "INTEGER", false, true), + fieldSchema("", "Uint32", "INTEGER", false, true), + fieldSchema("", "Uint16", "INTEGER", false, true), + fieldSchema("", "Uint8", "INTEGER", false, true), + fieldSchema("", "Uintptr", "INTEGER", false, true), + fieldSchema("", "Uint", "INTEGER", false, true), + }, + }, + { + in: allFloat{}, + want: Schema{ + fieldSchema("", "Float64", "FLOAT", false, true), + fieldSchema("", "Float32", "FLOAT", false, true), + }, + }, + { + in: allBoolean{}, + want: Schema{ + fieldSchema("", "Bool", "BOOLEAN", false, true), + }, + }, + { + in: allTime{}, + want: Schema{ + fieldSchema("", "Time", "TIMESTAMP", false, true), + }, + }, + { + in: allStrings{}, + want: Schema{ + fieldSchema("", "String", "STRING", false, true), + fieldSchema("", "ByteSlice", "STRING", false, true), + }, + }, + } + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) + } + } +} + +type containsNested struct { + hidden string + NotNested int + Nested struct { + Inside int + } +} + +type containsDoubleNested struct { + NotNested int + Nested struct { + InsideNested struct { + Inside int + } + } +} + +func TestNestedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: containsNested{}, + want: Schema{ + fieldSchema("", "NotNested", "INTEGER", false, true), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Name: "Inside", + Type: "INTEGER", + Required: true, + }, + }, + }, + }, + }, + { + in: containsDoubleNested{}, + want: Schema{ + fieldSchema("", "NotNested", "INTEGER", false, true), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Name: "InsideNested", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Name: "Inside", + Type: "INTEGER", + Required: true, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) + } + } +} + +type simpleRepeated struct { + NotRepeated []byte + RepeatedByteSlice [][]byte + Repeated []int +} + +type simpleNestedRepeated struct { + NotRepeated int + Repeated []struct { + Inside int + } +} + +func TestRepeatedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: simpleRepeated{}, + want: Schema{ + fieldSchema("", "NotRepeated", "STRING", false, true), + fieldSchema("", "RepeatedByteSlice", "STRING", true, false), + fieldSchema("", "Repeated", "INTEGER", true, false), + }, + }, + { + in: simpleNestedRepeated{}, + want: Schema{ + fieldSchema("", "NotRepeated", "INTEGER", false, true), + &FieldSchema{ + Name: "Repeated", + Repeated: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Name: "Inside", + Type: "INTEGER", + Required: true, + }, + }, + }, + }, + }, + } + + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, tc.want) + } + } +} + +type Embedded struct { + Embedded int +} + +type nestedEmbedded struct { + Embedded +} + +func TestSchemaErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: []byte{}, + err: errNoStruct, + }, + { + in: new(int), + err: errNoStruct, + }, + { + in: new(allStrings), + err: errNoStruct, + }, + { + in: struct{ Complex complex64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Map map[string]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Chan chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Ptr *int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Interface interface{} }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][][]byte }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ ChanSlice []chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ NestedChan struct{ Chan []chan bool } }{}, + err: errUnsupportedFieldType, + }, + { + in: nestedEmbedded{}, + err: errUnsupportedFieldType, + }, + } + for i, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/service.go b/vendor/cloud.google.com/go/bigquery/service.go new file mode 100644 index 000000000..d83f05273 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/service.go @@ -0,0 +1,484 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// service provides an internal abstraction to isolate the generated +// BigQuery API; most of this package uses this interface instead. +// The single implementation, *bigqueryService, contains all the knowledge +// of the generated BigQuery API. +type service interface { + // Jobs + insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error) + getJobType(ctx context.Context, projectId, jobID string) (jobType, error) + jobCancel(ctx context.Context, projectId, jobID string) error + jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) + + // Tables + createTable(ctx context.Context, conf *createTableConf) error + getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) + deleteTable(ctx context.Context, projectID, datasetID, tableID string) error + listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) + patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) + + // Table data + readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) + insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error + + // Datasets + insertDataset(ctx context.Context, datasetID, projectID string) error + + // Misc + + // readQuery reads data resulting from a query job. If the job is + // incomplete, an errIncompleteJob is returned. readQuery may be called + // repeatedly to poll for job completion. + readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) +} + +type bigqueryService struct { + s *bq.Service +} + +func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) { + s, err := bq.New(client) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + s.BasePath = endpoint + + return &bigqueryService{s: s}, nil +} + +// getPages calls the supplied getPage function repeatedly until there are no pages left to get. +// token is the token of the initial page to start from. Use an empty string to start from the beginning. +func getPages(token string, getPage func(token string) (nextToken string, err error)) error { + for { + var err error + token, err = getPage(token) + if err != nil { + return err + } + if token == "" { + return nil + } + } +} + +func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { + res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do() + if err != nil { + return nil, err + } + return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil +} + +type pagingConf struct { + recordsPerRequest int64 + setRecordsPerRequest bool + + startIndex uint64 +} + +type readTableConf struct { + projectID, datasetID, tableID string + paging pagingConf + schema Schema // lazily initialized when the first page of data is fetched. +} + +type readDataResult struct { + pageToken string + rows [][]Value + totalRows uint64 + schema Schema +} + +type readQueryConf struct { + projectID, jobID string + paging pagingConf +} + +func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) { + // Prepare request to fetch one page of table data. + req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID) + + if pageToken != "" { + req.PageToken(pageToken) + } else { + req.StartIndex(conf.paging.startIndex) + } + + if conf.paging.setRecordsPerRequest { + req.MaxResults(conf.paging.recordsPerRequest) + } + + // Fetch the table schema in the background, if necessary. + var schemaErr error + var schemaFetch sync.WaitGroup + if conf.schema == nil { + schemaFetch.Add(1) + go func() { + defer schemaFetch.Done() + var t *bq.Table + t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID). + Fields("schema"). + Context(ctx). + Do() + if schemaErr == nil && t.Schema != nil { + conf.schema = convertTableSchema(t.Schema) + } + }() + } + + res, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + + schemaFetch.Wait() + if schemaErr != nil { + return nil, schemaErr + } + + result := &readDataResult{ + pageToken: res.PageToken, + totalRows: uint64(res.TotalRows), + schema: conf.schema, + } + result.rows, err = convertRows(res.Rows, conf.schema) + if err != nil { + return nil, err + } + return result, nil +} + +var errIncompleteJob = errors.New("internal error: query results not available because job is not complete") + +// getQueryResultsTimeout controls the maximum duration of a request to the +// BigQuery GetQueryResults endpoint. Setting a long timeout here does not +// cause increased overall latency, as results are returned as soon as they are +// available. +const getQueryResultsTimeout = time.Minute + +func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) { + req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID). + TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6) + + if pageToken != "" { + req.PageToken(pageToken) + } else { + req.StartIndex(conf.paging.startIndex) + } + + if conf.paging.setRecordsPerRequest { + req.MaxResults(conf.paging.recordsPerRequest) + } + + res, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + + if !res.JobComplete { + return nil, errIncompleteJob + } + schema := convertTableSchema(res.Schema) + result := &readDataResult{ + pageToken: res.PageToken, + totalRows: res.TotalRows, + schema: schema, + } + result.rows, err = convertRows(res.Rows, schema) + if err != nil { + return nil, err + } + return result, nil +} + +type insertRowsConf struct { + templateSuffix string + ignoreUnknownValues bool + skipInvalidRows bool +} + +func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + req := &bq.TableDataInsertAllRequest{ + TemplateSuffix: conf.templateSuffix, + IgnoreUnknownValues: conf.ignoreUnknownValues, + SkipInvalidRows: conf.skipInvalidRows, + } + for _, row := range rows { + m := make(map[string]bq.JsonValue) + for k, v := range row.Row { + m[k] = bq.JsonValue(v) + } + req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ + InsertId: row.InsertID, + Json: m, + }) + } + res, err := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do() + if err != nil { + return err + } + if len(res.InsertErrors) == 0 { + return nil + } + + var errs PutMultiError + for _, e := range res.InsertErrors { + if int(e.Index) > len(rows) { + return fmt.Errorf("internal error: unexpected row index: %v", e.Index) + } + rie := RowInsertionError{ + InsertID: rows[e.Index].InsertID, + RowIndex: int(e.Index), + } + for _, errp := range e.Errors { + rie.Errors = append(rie.Errors, errorFromErrorProto(errp)) + } + errs = append(errs, rie) + } + return errs +} + +type jobType int + +const ( + copyJobType jobType = iota + extractJobType + loadJobType + queryJobType +) + +func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) { + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("configuration"). + Context(ctx). + Do() + + if err != nil { + return 0, err + } + + switch { + case res.Configuration.Copy != nil: + return copyJobType, nil + case res.Configuration.Extract != nil: + return extractJobType, nil + case res.Configuration.Load != nil: + return loadJobType, nil + case res.Configuration.Query != nil: + return queryJobType, nil + default: + return 0, errors.New("unknown job type") + } +} + +func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error { + // Jobs.Cancel returns a job entity, but the only relevant piece of + // data it may contain (the status of the job) is unreliable. From the + // docs: "This call will return immediately, and the client will need + // to poll for the job status to see if the cancel completed + // successfully". So it would be misleading to return a status. + _, err := s.s.Jobs.Cancel(projectID, jobID). + Fields(). // We don't need any of the response data. + Context(ctx). + Do() + return err +} + +func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("status"). // Only fetch what we need. + Context(ctx). + Do() + if err != nil { + return nil, err + } + return jobStatusFromProto(res.Status) +} + +var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} + +func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { + state, ok := stateMap[status.State] + if !ok { + return nil, fmt.Errorf("unexpected job state: %v", status.State) + } + + newStatus := &JobStatus{ + State: state, + err: nil, + } + if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { + newStatus.err = err + } + + for _, ep := range status.Errors { + newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) + } + return newStatus, nil +} + +// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset. +func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { + var tables []*Table + res, err := s.s.Tables.List(projectID, datasetID). + PageToken(pageToken). + Context(ctx). + Do() + if err != nil { + return nil, "", err + } + for _, t := range res.Tables { + tables = append(tables, s.convertListedTable(t)) + } + return tables, res.NextPageToken, nil +} + +type createTableConf struct { + projectID, datasetID, tableID string + expiration time.Time + viewQuery string + schema *bq.TableSchema +} + +// createTable creates a table in the BigQuery service. +// expiration is an optional time after which the table will be deleted and its storage reclaimed. +// If viewQuery is non-empty, the created table will be of type VIEW. +// Note: expiration can only be set during table creation. +// Note: after table creation, a view can be modified only if its table was initially created with a view. +func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error { + table := &bq.Table{ + TableReference: &bq.TableReference{ + ProjectId: conf.projectID, + DatasetId: conf.datasetID, + TableId: conf.tableID, + }, + } + if !conf.expiration.IsZero() { + table.ExpirationTime = conf.expiration.UnixNano() / 1000 + } + // TODO(jba): make it impossible to provide both a view query and a schema. + if conf.viewQuery != "" { + table.View = &bq.ViewDefinition{ + Query: conf.viewQuery, + } + } + if conf.schema != nil { + table.Schema = conf.schema + } + + _, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do() + return err +} + +func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) { + table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do() + if err != nil { + return nil, err + } + return bqTableToMetadata(table), nil +} + +func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error { + return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do() +} + +func bqTableToMetadata(t *bq.Table) *TableMetadata { + md := &TableMetadata{ + Description: t.Description, + Name: t.FriendlyName, + Type: TableType(t.Type), + ID: t.Id, + NumBytes: t.NumBytes, + NumRows: t.NumRows, + } + if t.ExpirationTime != 0 { + md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6) + } + if t.CreationTime != 0 { + md.CreationTime = time.Unix(0, t.CreationTime*1e6) + } + if t.LastModifiedTime != 0 { + md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6)) + } + if t.Schema != nil { + md.Schema = convertTableSchema(t.Schema) + } + if t.View != nil { + md.View = t.View.Query + } + + return md +} + +func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table { + return &Table{ + ProjectID: t.TableReference.ProjectId, + DatasetID: t.TableReference.DatasetId, + TableID: t.TableReference.TableId, + service: s, + } +} + +// patchTableConf contains fields to be patched. +type patchTableConf struct { + // These fields are omitted from the patch operation if nil. + Description *string + Name *string +} + +func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) { + t := &bq.Table{} + forceSend := func(field string) { + t.ForceSendFields = append(t.ForceSendFields, field) + } + + if conf.Description != nil { + t.Description = *conf.Description + forceSend("Description") + } + if conf.Name != nil { + t.FriendlyName = *conf.Name + forceSend("FriendlyName") + } + table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t). + Context(ctx). + Do() + if err != nil { + return nil, err + } + return bqTableToMetadata(table), nil +} + +func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error { + ds := &bq.Dataset{ + DatasetReference: &bq.DatasetReference{DatasetId: datasetID}, + } + _, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do() + return err +} diff --git a/vendor/cloud.google.com/go/bigquery/table.go b/vendor/cloud.google.com/go/bigquery/table.go new file mode 100644 index 000000000..e3cf5576e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table.go @@ -0,0 +1,281 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "time" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +// A Table is a reference to a BigQuery table. +type Table struct { + // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. + // In this case the result will be stored in an ephemeral table. + ProjectID string + DatasetID string + // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + TableID string + + service service +} + +// TableMetadata contains information about a BigQuery table. +type TableMetadata struct { + Description string // The user-friendly description of this table. + Name string // The user-friendly name for this table. + Schema Schema + View string + + ID string // An opaque ID uniquely identifying the table. + Type TableType + + // The time when this table expires. If not set, the table will persist + // indefinitely. Expired tables will be deleted and their storage reclaimed. + ExpirationTime time.Time + + CreationTime time.Time + LastModifiedTime time.Time + + // The size of the table in bytes. + // This does not include data that is being buffered during a streaming insert. + NumBytes int64 + + // The number of rows of data in this table. + // This does not include data that is being buffered during a streaming insert. + NumRows uint64 +} + +// Tables is a group of tables. The tables may belong to differing projects or datasets. +type Tables []*Table + +// CreateDisposition specifies the circumstances under which destination table will be created. +// Default is CreateIfNeeded. +type TableCreateDisposition string + +const ( + // The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. + CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" + + // The table must already exist and will not be automatically created. + CreateNever TableCreateDisposition = "CREATE_NEVER" +) + +func CreateDisposition(disp TableCreateDisposition) Option { return disp } + +func (opt TableCreateDisposition) implementsOption() {} + +func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.CreateDisposition = string(opt) +} + +func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) { + conf.CreateDisposition = string(opt) +} + +func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery) { + conf.CreateDisposition = string(opt) +} + +// TableWriteDisposition specifies how existing data in a destination table is treated. +// Default is WriteAppend. +type TableWriteDisposition string + +const ( + // Data will be appended to any existing data in the destination table. + // Data is appended atomically on successful completion of a job. + WriteAppend TableWriteDisposition = "WRITE_APPEND" + + // Existing data in the destination table will be overwritten. + // Data is overwritten atomically on successful completion of a job. + WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" + + // Writes will fail if the destination table already contains data. + WriteEmpty TableWriteDisposition = "WRITE_EMPTY" +) + +func WriteDisposition(disp TableWriteDisposition) Option { return disp } + +func (opt TableWriteDisposition) implementsOption() {} + +func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad) { + conf.WriteDisposition = string(opt) +} + +func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy) { + conf.WriteDisposition = string(opt) +} + +func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery) { + conf.WriteDisposition = string(opt) +} + +// TableType is the type of table. +type TableType string + +const ( + RegularTable TableType = "TABLE" + ViewTable TableType = "VIEW" +) + +func (t *Table) implementsSource() {} +func (t *Table) implementsReadSource() {} +func (t *Table) implementsDestination() {} +func (ts Tables) implementsSource() {} + +func (t *Table) tableRefProto() *bq.TableReference { + return &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } +} + +// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. +func (t *Table) FullyQualifiedName() string { + return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) +} + +// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. +func (t *Table) implicitTable() bool { + return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" +} + +func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad) { + conf.DestinationTable = t.tableRefProto() +} + +func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract) { + conf.SourceTable = t.tableRefProto() +} + +func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy) { + conf.DestinationTable = t.tableRefProto() +} + +func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy) { + for _, t := range ts { + conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) + } +} + +func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery) { + if !t.implicitTable() { + conf.DestinationTable = t.tableRefProto() + } +} + +func (t *Table) customizeReadSrc(cursor *readTableConf) { + cursor.projectID = t.ProjectID + cursor.datasetID = t.DatasetID + cursor.tableID = t.TableID +} + +// Create creates a table in the BigQuery service. +func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error { + conf := &createTableConf{ + projectID: t.ProjectID, + datasetID: t.DatasetID, + tableID: t.TableID, + } + for _, o := range options { + o.customizeCreateTable(conf) + } + return t.service.createTable(ctx, conf) +} + +// Metadata fetches the metadata for the table. +func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { + return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) +} + +// Delete deletes the table. +func (t *Table) Delete(ctx context.Context) error { + return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) +} + +// A CreateTableOption is an optional argument to CreateTable. +type CreateTableOption interface { + customizeCreateTable(*createTableConf) +} + +type tableExpiration time.Time + +// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time. +func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) } + +func (opt tableExpiration) customizeCreateTable(conf *createTableConf) { + conf.expiration = time.Time(opt) +} + +type viewQuery string + +// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. +// For more information see: https://cloud.google.com/bigquery/querying-data#views +func ViewQuery(query string) CreateTableOption { return viewQuery(query) } + +func (opt viewQuery) customizeCreateTable(conf *createTableConf) { + conf.viewQuery = string(opt) +} + +// TableMetadataPatch represents a set of changes to a table's metadata. +type TableMetadataPatch struct { + s service + projectID, datasetID, tableID string + conf patchTableConf +} + +// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields. +// In order to apply the changes, the TableMetadataPatch's Apply method must be called. +func (t *Table) Patch() *TableMetadataPatch { + return &TableMetadataPatch{ + s: t.service, + projectID: t.ProjectID, + datasetID: t.DatasetID, + tableID: t.TableID, + } +} + +// Description sets the table description. +func (p *TableMetadataPatch) Description(desc string) { + p.conf.Description = &desc +} + +// Name sets the table name. +func (p *TableMetadataPatch) Name(name string) { + p.conf.Name = &name +} + +// TODO(mcgreevy): support patching the schema. + +// Apply applies the patch operation. +func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) { + return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf) +} + +// NewUploader returns an *Uploader that can be used to append rows to t. +func (t *Table) NewUploader(opts ...UploadOption) *Uploader { + uploader := &Uploader{t: t} + + for _, o := range opts { + o.customizeInsertRows(&uploader.conf) + } + + return uploader +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader.go b/vendor/cloud.google.com/go/bigquery/uploader.go new file mode 100644 index 000000000..b09240288 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader.go @@ -0,0 +1,121 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + + "golang.org/x/net/context" +) + +// An UploadOption is an optional argument to NewUploader. +type UploadOption interface { + customizeInsertRows(conf *insertRowsConf) +} + +// An Uploader does streaming inserts into a BigQuery table. +// It is safe for concurrent use. +type Uploader struct { + conf insertRowsConf + t *Table +} + +// SkipInvalidRows returns an UploadOption that causes rows containing invalid data to be silently ignored. +// The default value is false, which causes the entire request to fail, if there is an attempt to insert an invalid row. +func SkipInvalidRows() UploadOption { return skipInvalidRows{} } + +type skipInvalidRows struct{} + +func (opt skipInvalidRows) customizeInsertRows(conf *insertRowsConf) { + conf.skipInvalidRows = true +} + +// UploadIgnoreUnknownValues returns an UploadOption that causes values not matching the schema to be ignored. +// If this option is not used, records containing such values are treated as invalid records. +func UploadIgnoreUnknownValues() UploadOption { return uploadIgnoreUnknownValues{} } + +type uploadIgnoreUnknownValues struct{} + +func (opt uploadIgnoreUnknownValues) customizeInsertRows(conf *insertRowsConf) { + conf.ignoreUnknownValues = true +} + +// A TableTemplateSuffix allows Uploaders to create tables automatically. +// +// Experimental: this option is experimental and may be modified or removed in future versions, +// regardless of any other documented package stability guarantees. +// +// When you specify a suffix, the table you upload data to +// will be used as a template for creating a new table, with the same schema, +// called + . +// +// More information is available at +// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables +func TableTemplateSuffix(suffix string) UploadOption { return tableTemplateSuffix(suffix) } + +type tableTemplateSuffix string + +func (opt tableTemplateSuffix) customizeInsertRows(conf *insertRowsConf) { + conf.templateSuffix = string(opt) +} + +// Put uploads one or more rows to the BigQuery service. src must implement ValueSaver or be a slice of ValueSavers. +// Put returns a PutMultiError if one or more rows failed to be uploaded. +// The PutMultiError contains a RowInsertionError for each failed row. +func (u *Uploader) Put(ctx context.Context, src interface{}) error { + // TODO(mcgreevy): Support structs which do not implement ValueSaver as src, a la Datastore. + + if saver, ok := src.(ValueSaver); ok { + return u.putMulti(ctx, []ValueSaver{saver}) + } + + srcVal := reflect.ValueOf(src) + if srcVal.Kind() != reflect.Slice { + return fmt.Errorf("%T is not a ValueSaver or slice of ValueSavers", src) + } + + var savers []ValueSaver + for i := 0; i < srcVal.Len(); i++ { + s := srcVal.Index(i).Interface() + saver, ok := s.(ValueSaver) + if !ok { + return fmt.Errorf("element %d of src is of type %T, which is not a ValueSaver", i, s) + } + savers = append(savers, saver) + } + return u.putMulti(ctx, savers) +} + +func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { + var rows []*insertionRow + for _, saver := range src { + row, insertID, err := saver.Save() + if err != nil { + return err + } + rows = append(rows, &insertionRow{InsertID: insertID, Row: row}) + } + return u.t.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &u.conf) +} + +// An insertionRow represents a row of data to be inserted into a table. +type insertionRow struct { + // If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of + // this row on a best-effort basis. + InsertID string + // The data to be inserted, represented as a map from field name to Value. + Row map[string]Value +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader_test.go b/vendor/cloud.google.com/go/bigquery/uploader_test.go new file mode 100644 index 000000000..8747ee6c3 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader_test.go @@ -0,0 +1,234 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type testSaver struct { + ir *insertionRow + err error +} + +func (ts testSaver) Save() (map[string]Value, string, error) { + return ts.ir.Row, ts.ir.InsertID, ts.err +} + +func TestRejectsNonValueSavers(t *testing.T) { + u := Uploader{t: defaultTable(nil)} + + testCases := []struct { + src interface{} + }{ + { + src: 1, + }, + { + src: []int{1, 2}, + }, + { + src: []interface{}{ + testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}}, + 1, + }, + }, + } + + for _, tc := range testCases { + if err := u.Put(context.Background(), tc.src); err == nil { + t.Errorf("put value: %v; got err: %v; want nil", tc.src, err) + } + } +} + +type insertRowsRecorder struct { + rowBatches [][]*insertionRow + service +} + +func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + irr.rowBatches = append(irr.rowBatches, rows) + return nil +} + +func TestInsertsData(t *testing.T) { + table := &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + } + + testCases := []struct { + data [][]*insertionRow + }{ + { + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + }, + { + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + { + &insertionRow{"c", map[string]Value{"three": 3}}, + &insertionRow{"d", map[string]Value{"four": 4}}, + }, + }, + }, + } + for _, tc := range testCases { + irr := &insertRowsRecorder{} + table.service = irr + u := Uploader{t: table} + for _, batch := range tc.data { + if len(batch) == 0 { + continue + } + var toUpload interface{} + if len(batch) == 1 { + toUpload = testSaver{ir: batch[0]} + } else { + savers := []testSaver{} + for _, row := range batch { + savers = append(savers, testSaver{ir: row}) + } + toUpload = savers + } + + err := u.Put(context.Background(), toUpload) + if err != nil { + t.Errorf("expected successful Put of ValueSaver; got: %v", err) + } + } + if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) { + t.Errorf("got: %v, want: %v", got, want) + } + } +} + +type uploadOptionRecorder struct { + received *insertRowsConf + service +} + +func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + u.received = conf + return nil +} + +func TestUploadOptionsPropagate(t *testing.T) { + // we don't care for the data in this testcase. + dummyData := testSaver{ir: &insertionRow{}} + + tests := [...]struct { + opts []UploadOption + conf insertRowsConf + }{ + { // test zero options lead to zero value for insertRowsConf + }, + { + opts: []UploadOption{ + TableTemplateSuffix("suffix"), + }, + conf: insertRowsConf{ + templateSuffix: "suffix", + }, + }, + { + opts: []UploadOption{ + UploadIgnoreUnknownValues(), + }, + conf: insertRowsConf{ + ignoreUnknownValues: true, + }, + }, + { + opts: []UploadOption{ + SkipInvalidRows(), + }, + conf: insertRowsConf{ + skipInvalidRows: true, + }, + }, + { // multiple upload options combine + opts: []UploadOption{ + TableTemplateSuffix("suffix"), + SkipInvalidRows(), + UploadIgnoreUnknownValues(), + }, + conf: insertRowsConf{ + templateSuffix: "suffix", + skipInvalidRows: true, + ignoreUnknownValues: true, + }, + }, + } + + for i, tc := range tests { + recorder := new(uploadOptionRecorder) + table := &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + service: recorder, + } + + u := table.NewUploader(tc.opts...) + err := u.Put(context.Background(), dummyData) + if err != nil { + t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err) + } + + if recorder.received == nil { + t.Fatalf("%d: received no options at all!", i) + } + + want := tc.conf + got := *recorder.received + if got != want { + t.Errorf("%d: got %#v, want %#v, opts=%#v", i, got, want, tc.opts) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/utils_test.go b/vendor/cloud.google.com/go/bigquery/utils_test.go new file mode 100644 index 000000000..6411c5d63 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/utils_test.go @@ -0,0 +1,54 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +var defaultGCS = &GCSReference{ + uris: []string{"uri"}, +} + +var defaultQuery = &Query{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", +} + +func defaultTable(s service) *Table { + return &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + service: s, + } +} + +type testService struct { + *bq.Job + + service +} + +func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { + s.Job = job + return &Job{}, nil +} + +func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + return &JobStatus{State: Done}, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go new file mode 100644 index 000000000..2433ad929 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -0,0 +1,195 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "strconv" + "time" + + bq "google.golang.org/api/bigquery/v2" +) + +// Value stores the contents of a single cell from a BigQuery result. +type Value interface{} + +// ValueLoader stores a slice of Values representing a result row from a Read operation. +// See Iterator.Get for more information. +type ValueLoader interface { + Load(v []Value) error +} + +// ValueList converts a []Value to implement ValueLoader. +type ValueList []Value + +// Load stores a sequence of values in a ValueList. +func (vs *ValueList) Load(v []Value) error { + *vs = append(*vs, v...) + return nil +} + +// A ValueSaver returns a row of data to be inserted into a table. +type ValueSaver interface { + // Save returns a row to be inserted into a BigQuery table, represented + // as a map from field name to Value. + // If insertID is non-empty, BigQuery will use it to de-duplicate + // insertions of this row on a best-effort basis. + Save() (row map[string]Value, insertID string, err error) +} + +// ValuesSaver implements ValueSaver for a slice of Values. +type ValuesSaver struct { + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + Row []Value +} + +// Save implements ValueSaver +func (vls *ValuesSaver) Save() (map[string]Value, string, error) { + m, err := valuesToMap(vls.Row, vls.Schema) + return m, vls.InsertID, err +} + +func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { + if len(vs) != len(schema) { + return nil, errors.New("Schema does not match length of row to be inserted") + } + + m := make(map[string]Value) + for i, fieldSchema := range schema { + if fieldSchema.Type == RecordFieldType { + nested, ok := vs[i].([]Value) + if !ok { + return nil, errors.New("Nested record is not a []Value") + } + value, err := valuesToMap(nested, fieldSchema.Schema) + if err != nil { + return nil, err + } + m[fieldSchema.Name] = value + } else { + m[fieldSchema.Name] = vs[i] + } + } + return m, nil +} + +// convertRows converts a series of TableRows into a series of Value slices. +// schema is used to interpret the data from rows; its length must match the +// length of each row. +func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { + var rs [][]Value + for _, r := range rows { + row, err := convertRow(r, schema) + if err != nil { + return nil, err + } + rs = append(rs, row) + } + return rs, nil +} + +func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { + if len(schema) != len(r.F) { + return nil, errors.New("schema length does not match row length") + } + var values []Value + for i, cell := range r.F { + fs := schema[i] + v, err := convertValue(cell.V, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { + switch val := val.(type) { + case nil: + return nil, nil + case []interface{}: + return convertRepeatedRecord(val, typ, schema) + case map[string]interface{}: + return convertNestedRecord(val, schema) + case string: + return convertBasicType(val, typ) + default: + return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) + } +} + +func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { + var values []Value + for _, cell := range vals { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + v, err := convertValue(val, typ, schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { + // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. + + // Nested records are wrapped in a map with a single key, "f". + record := val["f"].([]interface{}) + if len(record) != len(schema) { + return nil, errors.New("schema length does not match record length") + } + + var values []Value + for i, cell := range record { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + + fs := schema[i] + v, err := convertValue(val, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +// convertBasicType returns val as an interface with a concrete type specified by typ. +func convertBasicType(val string, typ FieldType) (Value, error) { + switch typ { + case StringFieldType: + return val, nil + case IntegerFieldType: + return strconv.Atoi(val) + case FloatFieldType: + return strconv.ParseFloat(val, 64) + case BooleanFieldType: + return strconv.ParseBool(val) + case TimestampFieldType: + f, err := strconv.ParseFloat(val, 64) + return Value(time.Unix(0, int64(f*1e9))), err + default: + return nil, errors.New("unrecognized type") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go new file mode 100644 index 000000000..a1edaebb2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -0,0 +1,416 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + "testing" + "time" + + bq "google.golang.org/api/bigquery/v2" +) + +func TestConvertBasicValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{"a", 1, 1.2, true} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertTime(t *testing.T) { + schema := []*FieldSchema{ + {Type: TimestampFieldType}, + } + thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC) + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + if !got[0].(time.Time).Equal(thyme) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) + } +} + +func TestConvertNullValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: nil}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{nil} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestBasicRepetition(t *testing.T) { + schema := []*FieldSchema{ + {Type: IntegerFieldType, Repeated: true}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: []interface{}{ + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{1, 2, 3}} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestNestedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{"v": "1"}, + map[string]interface{}{"v": "2"}, + map[string]interface{}{"v": "3"}, + }, + }, + }, + }, + }, + }, + } + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{[]Value{1, 2, 3}}} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // only record (repeated ints) + "v": []interface{}{ // pointless wrapper. + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{ + "v": "4", + }, + map[string]interface{}{ + "v": "5", + }, + map[string]interface{}{ + "v": "6", + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. + []Value{1, 2, 3}, // the repeated integer field is a list of length 3. + }, + []Value{ // second record + []Value{4, 5, 6}, + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRecord(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + { + Type: StringFieldType, + }, + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType}, + {Type: StringFieldType}, + }, + }, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // first record field (name) + "v": "first repeated record", + }, + map[string]interface{}{ // second record field (nested record). + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // nested record fields + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "two", + }, + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "second repeated record", + }, + map[string]interface{}{ + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "3", + }, + map[string]interface{}{ + "v": "four", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + // TODO: test with flattenresults. + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // record contains a string followed by a nested record. + "first repeated record", + []Value{ + 1, + "two", + }, + }, + []Value{ // second record. + "second repeated record", + []Value{ + 3, + "four", + }, + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) + } +} + +func TestValuesSaverConvertsToMap(t *testing.T) { + testCases := []struct { + vs ValuesSaver + want *insertionRow + }{ + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + {Name: "strField", Type: StringFieldType}, + }, + InsertID: "iid", + Row: []Value{1, "a"}, + }, + want: &insertionRow{ + InsertID: "iid", + Row: map[string]Value{"intField": 1, "strField": "a"}, + }, + }, + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + { + Name: "recordField", + Type: RecordFieldType, + Schema: []*FieldSchema{ + {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, + }, + }, + }, + InsertID: "iid", + Row: []Value{1, []Value{[]Value{2, 3}}}, + }, + want: &insertionRow{ + InsertID: "iid", + Row: map[string]Value{ + "intField": 1, + "recordField": map[string]Value{ + "nestedInt": []Value{2, 3}, + }, + }, + }, + }, + } + for _, tc := range testCases { + data, insertID, err := tc.vs.Save() + if err != nil { + t.Errorf("Expected successful save; got: %v", err) + } + got := &insertionRow{insertID, data} + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("saving ValuesSaver: got:\n%v\nwant:\n%v", got, tc.want) + } + } +} + +func TestConvertRows(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + } + rows := []*bq.TableRow{ + {F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + }}, + {F: []*bq.TableCell{ + {V: "b"}, + {V: "2"}, + {V: "2.2"}, + {V: "false"}, + }}, + } + want := [][]Value{ + {"a", 1, 1.2, true}, + {"b", 2, 2.2, false}, + } + got, err := convertRows(rows, schema) + if err != nil { + t.Fatalf("got %v, want nil", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go new file mode 100644 index 000000000..3c29d4791 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -0,0 +1,267 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "regexp" + "strings" + + btopt "cloud.google.com/go/bigtable/internal/option" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const adminAddr = "bigtableadmin.googleapis.com:443" + +// AdminClient is a client type for performing admin operations within a specific instance. +type AdminClient struct { + conn *grpc.ClientConn + tClient btapb.BigtableTableAdminClient + + project, instance string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewAdminClient creates a new AdminClient for a given project and instance. +func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) { + o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &AdminClient{ + conn: conn, + tClient: btapb.NewBigtableTableAdminClient(conn), + project: project, + instance: instance, + md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)), + }, nil +} + +// Close closes the AdminClient. +func (ac *AdminClient) Close() error { + return ac.conn.Close() +} + +func (ac *AdminClient) instancePrefix() string { + return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance) +} + +// Tables returns a list of the tables in the instance. +func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ListTablesRequest{ + Parent: prefix, + } + res, err := ac.tClient.ListTables(ctx, req) + if err != nil { + return nil, err + } + names := make([]string, 0, len(res.Tables)) + for _, tbl := range res.Tables { + names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) + } + return names, nil +} + +// CreateTable creates a new table in the instance. +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.CreateTableRequest{ + Parent: prefix, + TableId: table, + } + _, err := ac.tClient.CreateTable(ctx, req) + if err != nil { + return err + } + return nil +} + +// CreateColumnFamily creates a new column family in a table. +func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { + // TODO(dsymonds): Permit specifying gcexpr and any other family settings. + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{ + { + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, + }, + }, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DeleteTable deletes a table and all of its data. +func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DeleteTableRequest{ + Name: prefix + "/tables/" + table, + } + _, err := ac.tClient.DeleteTable(ctx, req) + return err +} + +// DeleteColumnFamily deletes a column family in a table and all of its data. +func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{ + { + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true}, + }, + }, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// TableInfo represents information about a table. +type TableInfo struct { + Families []string +} + +// TableInfo retrieves information about a table. +func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.GetTableRequest{ + Name: prefix + "/tables/" + table, + } + res, err := ac.tClient.GetTable(ctx, req) + if err != nil { + return nil, err + } + ti := &TableInfo{} + for fam := range res.ColumnFamilies { + ti.Families = append(ti.Families, fam) + } + return ti, nil +} + +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + ctx = metadata.NewContext(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{ + { + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}}, + }, + }, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +const instanceAdminAddr = "bigtableadmin.googleapis.com:443" + +// InstanceAdminClient is a client type for performing admin operations on instances. +// These operations can be substantially more dangerous than those provided by AdminClient. +type InstanceAdminClient struct { + conn *grpc.ClientConn + iClient btapb.BigtableInstanceAdminClient + + project string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewInstanceAdminClient creates a new InstanceAdminClient for a given project. +func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) { + o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &InstanceAdminClient{ + conn: conn, + iClient: btapb.NewBigtableInstanceAdminClient(conn), + + project: project, + md: metadata.Pairs(resourcePrefixHeader, "projects/"+project), + }, nil +} + +// Close closes the InstanceAdminClient. +func (iac *InstanceAdminClient) Close() error { + return iac.conn.Close() +} + +// InstanceInfo represents information about an instance +type InstanceInfo struct { + Name string // name of the instance + DisplayName string // display name for UIs +} + +var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`) + +// Instances returns a list of instances in the project. +func (cac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) { + ctx = metadata.NewContext(ctx, cac.md) + req := &btapb.ListInstancesRequest{ + Parent: "projects/" + cac.project, + } + res, err := cac.iClient.ListInstances(ctx, req) + if err != nil { + return nil, err + } + + var is []*InstanceInfo + for _, i := range res.Instances { + m := instanceNameRegexp.FindStringSubmatch(i.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", i.Name) + } + is = append(is, &InstanceInfo{ + Name: m[2], + DisplayName: i.DisplayName, + }) + } + return is, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/admin_test.go b/vendor/cloud.google.com/go/bigtable/admin_test.go new file mode 100644 index 000000000..62941d8af --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin_test.go @@ -0,0 +1,73 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "reflect" + "sort" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestAdminIntegration(t *testing.T) { + srv, err := bttest.NewServer("127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer srv.Close() + t.Logf("bttest.Server running on %s", srv.Addr) + + ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial: %v", err) + } + + adminClient, err := NewAdminClient(ctx, "proj", "instance", option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { + t.Fatalf("Deleting table: %v", err) + } + if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go new file mode 100644 index 000000000..891a6a752 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -0,0 +1,717 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable // import "cloud.google.com/go/bigtable" + +import ( + "errors" + "fmt" + "io" + "strconv" + "time" + + "cloud.google.com/go/bigtable/internal/gax" + btopt "cloud.google.com/go/bigtable/internal/option" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const prodAddr = "bigtable.googleapis.com:443" + +// Client is a client for reading and writing data to tables in an instance. +// +// A Client is safe to use concurrently, except for its Close method. +type Client struct { + conn *grpc.ClientConn + client btpb.BigtableClient + project, instance string +} + +// NewClient creates a new Client for a given project and instance. +func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { + o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: btpb.NewBigtableClient(conn), + project: project, + instance: instance, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +var ( + idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} + isIdempotentRetryCode = make(map[codes.Code]bool) + retryOptions = []gax.CallOption{ + gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), + gax.WithRetryCodes(idempotentRetryCodes), + } +) + +func init() { + for _, code := range idempotentRetryCodes { + isIdempotentRetryCode[code] = true + } +} + +func (c *Client) fullTableName(table string) string { + return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) +} + +// A Table refers to a table. +// +// A Table is safe to use concurrently. +type Table struct { + c *Client + table string + + // Metadata to be sent with each request. + md metadata.MD +} + +// Open opens a table. +func (c *Client) Open(table string) *Table { + return &Table{ + c: c, + table: table, + md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), + } +} + +// TODO(dsymonds): Read method that returns a sequence of ReadItems. + +// ReadRows reads rows from a table. f is called for each row. +// If f returns false, the stream is shut down and ReadRows returns. +// f owns its argument, and f is called serially in order by row key. +// +// By default, the yielded rows will contain all values in all cells. +// Use RowFilter to limit the cells returned. +func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { + ctx = metadata.NewContext(ctx, t.md) + + var prevRowKey string + err := gax.Invoke(ctx, func(ctx context.Context) error { + req := &btpb.ReadRowsRequest{ + TableName: t.c.fullTableName(t.table), + Rows: arg.proto(), + } + for _, opt := range opts { + opt.set(req) + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + stream, err := t.c.client.ReadRows(ctx, req) + if err != nil { + return err + } + cr := newChunkReader() + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // Reset arg for next Invoke call. + arg = arg.retainRowsAfter(prevRowKey) + return err + } + + for _, cc := range res.Chunks { + row, err := cr.Process(cc) + if err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + if row == nil { + continue + } + prevRowKey = row.Key() + if !f(row) { + // Cancel and drain stream. + cancel() + for { + if _, err := stream.Recv(); err != nil { + // The stream has ended. We don't return an error + // because the caller has intentionally interrupted the scan. + return nil + } + } + } + } + if err := cr.Close(); err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + } + return err + }, retryOptions...) + + return err +} + +// ReadRow is a convenience implementation of a single-row reader. +// A missing row will return a zero-length map and a nil error. +func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { + var r Row + err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { + r = rr + return true + }, opts...) + return r, err +} + +// decodeFamilyProto adds the cell data from f to the given row. +func decodeFamilyProto(r Row, row string, f *btpb.Family) { + fam := f.Name // does not have colon + for _, col := range f.Columns { + for _, cell := range col.Cells { + ri := ReadItem{ + Row: row, + Column: fam + ":" + string(col.Qualifier), + Timestamp: Timestamp(cell.TimestampMicros), + Value: cell.Value, + } + r[fam] = append(r[fam], ri) + } + } +} + +// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange. +type RowSet interface { + proto() *btpb.RowSet + + // retainRowsAfter returns a new RowSet that does not include the + // given row key or any row key lexicographically less than it. + retainRowsAfter(lastRowKey string) RowSet +} + +// RowList is a sequence of row keys. +type RowList []string + +func (r RowList) proto() *btpb.RowSet { + keys := make([][]byte, len(r)) + for i, row := range r { + keys[i] = []byte(row) + } + return &btpb.RowSet{RowKeys: keys} +} + +func (r RowList) retainRowsAfter(lastRowKey string) RowSet { + var retryKeys RowList + for _, key := range r { + if key > lastRowKey { + retryKeys = append(retryKeys, key) + } + } + return retryKeys +} + +// A RowRange is a half-open interval [Start, Limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +// (Bigtable string comparison is the same as Go's.) +// A RowRange can be unbounded, encompassing all keys at least as large as Start. +type RowRange struct { + start string + limit string +} + +// NewRange returns the new RowRange [begin, end). +func NewRange(begin, end string) RowRange { + return RowRange{ + start: begin, + limit: end, + } +} + +// Unbounded tests whether a RowRange is unbounded. +func (r RowRange) Unbounded() bool { + return r.limit == "" +} + +// Contains says whether the RowRange contains the key. +func (r RowRange) Contains(row string) bool { + return r.start <= row && (r.limit == "" || r.limit > row) +} + +// String provides a printable description of a RowRange. +func (r RowRange) String() string { + a := strconv.Quote(r.start) + if r.Unbounded() { + return fmt.Sprintf("[%s,∞)", a) + } + return fmt.Sprintf("[%s,%q)", a, r.limit) +} + +func (r RowRange) proto() *btpb.RowSet { + var rr *btpb.RowRange + rr = &btpb.RowRange{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}} + if !r.Unbounded() { + rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} + } + return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} +} + +func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { + // Set the beginning of the range to the row after the last scanned. + start := lastRowKey + "\x00" + if r.Unbounded() { + return InfiniteRange(start) + } + return NewRange(start, r.limit) +} + +// SingleRow returns a RowRange for reading a single row. +func SingleRow(row string) RowRange { + return RowRange{ + start: row, + limit: row + "\x00", + } +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +func PrefixRange(prefix string) RowRange { + return RowRange{ + start: prefix, + limit: prefixSuccessor(prefix), + } +} + +// InfiniteRange returns the RowRange consisting of all keys at least as +// large as start. +func InfiniteRange(start string) RowRange { + return RowRange{ + start: start, + limit: "", + } +} + +// prefixSuccessor returns the lexically smallest string greater than the +// prefix, if it exists, or "" otherwise. In either case, it is the string +// needed for the Limit of a RowRange. +func prefixSuccessor(prefix string) string { + if prefix == "" { + return "" // infinite range + } + n := len(prefix) + for n--; n >= 0 && prefix[n] == '\xff'; n-- { + } + if n == -1 { + return "" + } + ans := []byte(prefix[:n]) + ans = append(ans, prefix[n]+1) + return string(ans) +} + +// A ReadOption is an optional argument to ReadRows. +type ReadOption interface { + set(req *btpb.ReadRowsRequest) +} + +// RowFilter returns a ReadOption that applies f to the contents of read rows. +func RowFilter(f Filter) ReadOption { return rowFilter{f} } + +type rowFilter struct{ f Filter } + +func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } + +// LimitRows returns a ReadOption that will limit the number of rows to be read. +func LimitRows(limit int64) ReadOption { return limitRows{limit} } + +type limitRows struct{ limit int64 } + +func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } + +// mutationsAreRetryable returns true if all mutations are idempotent +// and therefore retryable. A mutation is idempotent iff all cell timestamps +// have an explicit timestamp set and do not rely on the timestamp being set on the server. +func mutationsAreRetryable(muts []*btpb.Mutation) bool { + serverTime := int64(ServerTime) + for _, mut := range muts { + setCell := mut.GetSetCell() + if setCell != nil && setCell.TimestampMicros == serverTime { + return false + } + } + return true +} + +// Apply applies a Mutation to a specific row. +func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { + ctx = metadata.NewContext(ctx, t.md) + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + var callOptions []gax.CallOption + if m.cond == nil { + req := &btpb.MutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Mutations: m.ops, + } + if mutationsAreRetryable(m.ops) { + callOptions = retryOptions + } + var res *btpb.MutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = t.c.client.MutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(res) + } + return err + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + PredicateFilter: m.cond.proto(), + } + if m.mtrue != nil { + req.TrueMutations = m.mtrue.ops + } + if m.mfalse != nil { + req.FalseMutations = m.mfalse.ops + } + if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { + callOptions = retryOptions + } + var cmRes *btpb.CheckAndMutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(cmRes) + } + return err +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption interface { + after(res proto.Message) +} + +type applyAfterFunc func(res proto.Message) + +func (a applyAfterFunc) after(res proto.Message) { a(res) } + +// GetCondMutationResult returns an ApplyOption that reports whether the conditional +// mutation's condition matched. +func GetCondMutationResult(matched *bool) ApplyOption { + return applyAfterFunc(func(res proto.Message) { + if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { + *matched = res.PredicateMatched + } + }) +} + +// Mutation represents a set of changes for a single row of a table. +type Mutation struct { + ops []*btpb.Mutation + + // for conditional mutations + cond Filter + mtrue, mfalse *Mutation +} + +// NewMutation returns a new mutation. +func NewMutation() *Mutation { + return new(Mutation) +} + +// NewCondMutation returns a conditional mutation. +// The given row filter determines which mutation is applied: +// If the filter matches any cell in the row, mtrue is applied; +// otherwise, mfalse is applied. +// Either given mutation may be nil. +func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { + return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} +} + +// Set sets a value in a specified column, with the given timestamp. +// The timestamp will be truncated to millisecond resolution. +// A timestamp of ServerTime means to use the server timestamp. +func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { + if ts != ServerTime { + // Truncate to millisecond resolution, since that's the default table config. + // TODO(dsymonds): Provide a way to override this behaviour. + ts -= ts % 1000 + } + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimestampMicros: int64(ts), + Value: value, + }}}) +} + +// DeleteCellsInColumn will delete all the cells whose columns are family:column. +func (m *Mutation) DeleteCellsInColumn(family, column string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + }}}) +} + +// DeleteTimestampRange deletes all cells whose columns are family:column +// and whose timestamps are in the half-open interval [start, end). +// If end is zero, it will be interpreted as infinity. +func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimeRange: &btpb.TimestampRange{ + StartTimestampMicros: int64(start), + EndTimestampMicros: int64(end), + }, + }}}) +} + +// DeleteCellsInFamily will delete all the cells whose columns are family:*. +func (m *Mutation) DeleteCellsInFamily(family string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ + FamilyName: family, + }}}) +} + +// DeleteRow deletes the entire row. +func (m *Mutation) DeleteRow() { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) +} + +// entryErr is a container that combines an entry with the error that was returned for it. +// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. +type entryErr struct { + Entry *btpb.MutateRowsRequest_Entry + Err error +} + +// ApplyBulk applies multiple Mutations. +// Each mutation is individually applied atomically, +// but the set of mutations may be applied in any order. +// +// Two types of failures may occur. If the entire process +// fails, (nil, err) will be returned. If specific mutations +// fail to apply, ([]err, nil) will be returned, and the errors +// will correspond to the relevant rowKeys/muts arguments. +// +// Conditional mutations cannot be applied in bulk and providing one will result in an error. +func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { + ctx = metadata.NewContext(ctx, t.md) + if len(rowKeys) != len(muts) { + return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) + } + + origEntries := make([]*entryErr, len(rowKeys)) + for i, key := range rowKeys { + mut := muts[i] + if mut.cond != nil { + return nil, errors.New("conditional mutations cannot be applied in bulk") + } + origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} + } + + // entries will be reduced after each invocation to just what needs to be retried. + entries := make([]*entryErr, len(rowKeys)) + copy(entries, origEntries) + err := gax.Invoke(ctx, func(ctx context.Context) error { + err := t.doApplyBulk(ctx, entries, opts...) + if err != nil { + // We want to retry the entire request with the current entries + return err + } + entries = t.getApplyBulkRetries(entries) + if len(entries) > 0 && len(idempotentRetryCodes) > 0 { + // We have at least one mutation that needs to be retried. + // Return an arbitrary error that is retryable according to callOptions. + return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + } + return nil + }, retryOptions...) + + if err != nil { + return nil, err + } + + // Accumulate all of the errors into an array to return, interspersed with nils for successful + // entries. The absence of any errors means we should return nil. + var errs []error + var foundErr bool + for _, entry := range origEntries { + if entry.Err != nil { + foundErr = true + } + errs = append(errs, entry.Err) + } + if foundErr { + return errs, nil + } + return nil, nil +} + +// getApplyBulkRetries returns the entries that need to be retried +func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { + var retryEntries []*entryErr + for _, entry := range entries { + err := entry.Err + if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { + // There was an error and the entry is retryable. + retryEntries = append(retryEntries, entry) + } + } + return retryEntries +} + +// doApplyBulk does the work of a single ApplyBulk invocation +func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) + for i, entryErr := range entryErrs { + entries[i] = entryErr.Entry + } + req := &btpb.MutateRowsRequest{ + TableName: t.c.fullTableName(t.table), + Entries: entries, + } + stream, err := t.c.client.MutateRows(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + for i, entry := range res.Entries { + status := entry.Status + if status.Code == int32(codes.OK) { + entryErrs[i].Err = nil + } else { + entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) + } + } + after(res) + } + return nil +} + +// Timestamp is in units of microseconds since 1 January 1970. +type Timestamp int64 + +// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. +// It indicates that the server's timestamp should be used. +const ServerTime Timestamp = -1 + +// Time converts a time.Time into a Timestamp. +func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } + +// Now returns the Timestamp representation of the current time on the client. +func Now() Timestamp { return Time(time.Now()) } + +// Time converts a Timestamp into a time.Time. +func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } + +// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. +// It returns the newly written cells. +func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { + ctx = metadata.NewContext(ctx, t.md) + req := &btpb.ReadModifyWriteRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Rules: m.ops, + } + res, err := t.c.client.ReadModifyWriteRow(ctx, req) + if err != nil { + return nil, err + } + r := make(Row) + for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family + decodeFamilyProto(r, row, fam) + } + return r, nil +} + +// ReadModifyWrite represents a set of operations on a single row of a table. +// It is like Mutation but for non-idempotent changes. +// When applied, these operations operate on the latest values of the row's cells, +// and result in a new value being written to the relevant cell with a timestamp +// that is max(existing timestamp, current server time). +// +// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will +// be executed serially by the server. +type ReadModifyWrite struct { + ops []*btpb.ReadModifyWriteRule +} + +// NewReadModifyWrite returns a new ReadModifyWrite. +func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } + +// AppendValue appends a value to a specific cell's value. +// If the cell is unset, it will be treated as an empty value. +func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, + }) +} + +// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, +// and adds a value to it. If the cell is unset, it will be treated as zero. +// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite +// operation will fail. +func (m *ReadModifyWrite) Increment(family, column string, delta int64) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, + }) +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go new file mode 100644 index 000000000..0169c0289 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -0,0 +1,596 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestPrefix(t *testing.T) { + tests := []struct { + prefix, succ string + }{ + {"", ""}, + {"\xff", ""}, // when used, "" means Infinity + {"x\xff", "y"}, + {"\xfe", "\xff"}, + } + for _, tc := range tests { + got := prefixSuccessor(tc.prefix) + if got != tc.succ { + t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) + continue + } + r := PrefixRange(tc.prefix) + if tc.succ == "" && r.limit != "" { + t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) + } + if tc.succ != "" && r.limit != tc.succ { + t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) + } + } +} + +var useProd = flag.String("use_prod", "", `if set to "proj,instance,table", run integration test against production`) + +func TestClientIntegration(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + proj, instance, table := "proj", "instance", "mytable" + var clientOpts []option.ClientOption + timeout := 20 * time.Second + if *useProd == "" { + srv, err := bttest.NewServer("127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer srv.Close() + t.Logf("bttest.Server running on %s", srv.Addr) + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial: %v", err) + } + clientOpts = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + t.Logf("Running test against production") + a := strings.SplitN(*useProd, ",", 3) + proj, instance, table = a[0], a[1], a[2] + timeout = 5 * time.Minute + } + + ctx, _ := context.WithTimeout(context.Background(), timeout) + + client, err := NewClient(ctx, proj, instance, clientOpts...) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer client.Close() + checkpoint("dialed Client") + + adminClient, err := NewAdminClient(ctx, proj, instance, clientOpts...) + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + checkpoint("dialed AdminClient") + + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + tbl := client.Open(table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley": {"tjefferson"}, + "gwashington": {"jadams"}, + "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below + "jadams": {"gwashington", "tjefferson"}, + } + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + + // Do a conditional mutation with a complex filter. + mutTrue := NewMutation() + mutTrue.Set("follows", "wmckinley", 0, []byte("1")) + filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) + mut := NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + // Do a second condition mutation with a filter that does not match, + // and thus no changes should be made. + mutTrue = NewMutation() + mutTrue.DeleteRow() + filter = ColumnFilter("snoop.dogg") + mut = NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + checkpoint("did two conditional mutations") + + // Fetch a row. + row, err := tbl.ReadRow(ctx, "jadams") + if err != nil { + t.Fatalf("Reading a row: %v", err) + } + wantRow := Row{ + "follows": []ReadItem{ + {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, + {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, + }, + } + for _, ris := range row { + sort.Sort(byColumn(ris)) + } + if !reflect.DeepEqual(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + checkpoint("tested ReadRow") + + // Do a bunch of reads with filters. + readTests := []struct { + desc string + rr RowRange + filter Filter // may be nil + + // We do the read, grab all the cells, turn them into "--", + // sort that list, and join with a comma. + want string + }{ + { + desc: "read all, unfiltered", + rr: RowRange{}, + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InfiniteRange, unfiltered", + rr: InfiniteRange("tjefferson"), + want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with NewRange, unfiltered", + rr: NewRange("gargamel", "hubbard"), + want: "gwashington-jadams-1", + }, + { + desc: "read with PrefixRange, unfiltered", + rr: PrefixRange("jad"), + want: "jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with SingleRow, unfiltered", + rr: SingleRow("wmckinley"), + want: "wmckinley-tjefferson-1", + }, + { + desc: "read all, with ColumnFilter", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + } + for _, tc := range readTests { + var opts []ReadOption + if tc.filter != nil { + opts = append(opts, RowFilter(tc.filter)) + } + var elt []string + err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }, opts...) + if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + sort.Strings(elt) + if got := strings.Join(elt, ","); got != tc.want { + t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) + } + } + // Read a RowList + var elt []string + keys := RowList{"wmckinley", "gwashington", "jadams"} + want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" + err = tbl.ReadRows(ctx, keys, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }) + if err != nil { + t.Errorf("read RowList: %v", err) + } + + sort.Strings(elt) + if got := strings.Join(elt, ","); got != want { + t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) + } + checkpoint("tested ReadRows in a few ways") + + // Do a scan and stop part way through. + // Verify that the ReadRows callback doesn't keep running. + stopped := false + err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { + if r.Key() < "h" { + return true + } + if !stopped { + stopped = true + return false + } + t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) + return false + }) + if err != nil { + t.Errorf("Partial ReadRows: %v", err) + } + checkpoint("did partial ReadRows test") + + // Delete a row and check it goes away. + mut = NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { + t.Errorf("Apply DeleteRow: %v", err) + } + row, err = tbl.ReadRow(ctx, "wmckinley") + if err != nil { + t.Fatalf("Reading a row after DeleteRow: %v", err) + } + if len(row) != 0 { + t.Fatalf("Read non-zero row after DeleteRow: %v", row) + } + checkpoint("exercised DeleteRow") + + // Check ReadModifyWrite. + + if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + appendRMW := func(b []byte) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.AppendValue("counter", "likes", b) + return rmw + } + incRMW := func(n int64) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.Increment("counter", "likes", n) + return rmw + } + rmwSeq := []struct { + desc string + rmw *ReadModifyWrite + want []byte + }{ + { + desc: "append #1", + rmw: appendRMW([]byte{0, 0, 0}), + want: []byte{0, 0, 0}, + }, + { + desc: "append #2", + rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 + want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, + }, + { + desc: "increment", + rmw: incRMW(8), + want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, + }, + } + for _, step := range rmwSeq { + row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) + if err != nil { + t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) + } + clearTimestamps(row) + wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} + if !reflect.DeepEqual(row, wantRow) { + t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) + } + } + checkpoint("tested ReadModifyWrite") + + // Test arbitrary timestamps more thoroughly. + if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + const numVersions = 4 + mut = NewMutation() + for i := 0; i < numVersions; i++ { + // Timestamps are used in thousands because the server + // only permits that granularity. + mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + } + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err := tbl.ReadRow(ctx, "testrow") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + // These should be returned in descending timestamp order. + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) + } + // Do the same read, but filter to the latest two versions. + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) + } + // Delete the cell with timestamp 2000 and repeat the last read, + // checking that we get ts 3000 and ts 1000. + mut = NewMutation() + mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) + } + checkpoint("tested multiple versions in a cell") + + // Do highly concurrent reads/writes. + // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. + const maxConcurrency = 100 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + switch r := rand.Intn(100); { // r ∈ [0,100) + case 0 <= r && r < 30: + // Do a read. + _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) + if err != nil { + t.Errorf("Concurrent read: %v", err) + } + case 30 <= r && r < 100: + // Do a write. + mut := NewMutation() + mut.Set("ts", "col", 0, []byte("data")) + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Errorf("Concurrent write: %v", err) + } + } + }() + } + wg.Wait() + checkpoint("tested high concurrency") + + // Large reads, writes and scans. + bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB. + nonsense := []byte("lorem ipsum dolor sit amet, ") + fill(bigBytes, nonsense) + mut = NewMutation() + mut.Set("ts", "col", 0, bigBytes) + if err := tbl.Apply(ctx, "bigrow", mut); err != nil { + t.Errorf("Big write: %v", err) + } + r, err = tbl.ReadRow(ctx, "bigrow") + if err != nil { + t.Errorf("Big read: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "bigrow", Column: "ts:col", Value: bigBytes}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Big read returned incorrect bytes: %v", r) + } + // Now write 1000 rows, each with 82 KB values, then scan them all. + medBytes := make([]byte, 82<<10) + fill(medBytes, nonsense) + sem := make(chan int, 50) // do up to 50 mutations at a time. + for i := 0; i < 1000; i++ { + mut := NewMutation() + mut.Set("ts", "big-scan", 0, medBytes) + row := fmt.Sprintf("row-%d", i) + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + sem <- 1 + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Preparing large scan: %v", err) + } + }() + } + wg.Wait() + n := 0 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + n += len(ri.Value) + } + } + return true + }, RowFilter(ColumnFilter("big-scan"))) + if err != nil { + t.Errorf("Doing large scan: %v", err) + } + if want := 1000 * len(medBytes); n != want { + t.Errorf("Large scan returned %d bytes, want %d", n, want) + } + // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. + rc := 0 + wantRc := 3 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + rc++ + return true + }, LimitRows(int64(wantRc))) + if rc != wantRc { + t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) + } + checkpoint("tested big read/write/scan") + + // Test bulk mutations + if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + bulkData := map[string][]string{ + "red sox": {"2004", "2007", "2013"}, + "patriots": {"2001", "2003", "2004", "2014"}, + "celtics": {"1981", "1984", "1986", "2008"}, + } + var rowKeys []string + var muts []*Mutation + for row, ss := range bulkData { + mut := NewMutation() + for _, name := range ss { + mut.Set("bulk", name, 0, []byte("1")) + } + rowKeys = append(rowKeys, row) + muts = append(muts, mut) + } + status, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status != nil { + t.Errorf("non-nil errors: %v", err) + } + checkpoint("inserted bulk data") + + // Read each row back + for rowKey, ss := range bulkData { + row, err := tbl.ReadRow(ctx, rowKey) + if err != nil { + t.Fatalf("Reading a bulk row: %v", err) + } + for _, ris := range row { + sort.Sort(byColumn(ris)) + } + var wantItems []ReadItem + for _, val := range ss { + wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) + } + wantRow := Row{"bulk": wantItems} + if !reflect.DeepEqual(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + } + checkpoint("tested reading from bulk insert") + + // Test bulk write errors. + // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. + badMut := NewMutation() + badMut.Set("badfamily", "col", ServerTime, nil) + badMut2 := NewMutation() + badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) + status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status == nil { + t.Errorf("No errors for bad bulk mutation") + } else if status[0] == nil || status[1] == nil { + t.Errorf("No error for bad bulk mutation") + } +} + +func formatReadItem(ri ReadItem) string { + // Use the column qualifier only to make the test data briefer. + col := ri.Column[strings.Index(ri.Column, ":")+1:] + return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) +} + +func fill(b, sub []byte) { + for len(b) > len(sub) { + n := copy(b, sub) + b = b[n:] + } +} + +type byColumn []ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +func clearTimestamps(r Row) { + for _, ris := range r { + for i := range ris { + ris[i].Timestamp = 0 + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/example_test.go b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go new file mode 100644 index 000000000..5cfc370db --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bttest_test + +import ( + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func ExampleNewServer() { + + srv, err := bttest.NewServer("127.0.0.1:0") + + if err != nil { + log.Fatalln(err) + } + + ctx := context.Background() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + log.Fatalln(err) + } + + proj, instance := "proj", "instance" + + adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateTable(ctx, "example"); err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { + log.Fatalln(err) + } + + client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + tbl := client.Open("example") + + mut := bigtable.NewMutation() + mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) + if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { + log.Fatalln(err) + } + + if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { + log.Fatalln(err) + } else { + for _, column := range row["links"] { + fmt.Println(column.Column) + fmt.Println(string(column.Value)) + } + } + + // Output: + // links:golang.org + // Gophers! +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go new file mode 100644 index 000000000..1c7e6012d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -0,0 +1,947 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bttest contains test helpers for working with the bigtable package. + +To use a Server, create it, and then connect to it with no security: +(The project/instance values are ignored.) + srv, err := bttest.NewServer("127.0.0.1:0") + ... + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + ... + client, err := bigtable.NewClient(ctx, proj, instance, + option.WithGRPCConn(conn)) + ... +*/ +package bttest // import "cloud.google.com/go/bigtable/bttest" + +import ( + "encoding/binary" + "fmt" + "log" + "math/rand" + "net" + "regexp" + "sort" + "strings" + "sync" + "time" + + emptypb "github.com/golang/protobuf/ptypes/empty" + "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + statpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Server is an in-memory Cloud Bigtable fake. +// It is unauthenticated, and only a rough approximation. +type Server struct { + Addr string + + l net.Listener + srv *grpc.Server + s *server +} + +// server is the real implementation of the fake. +// It is a separate and unexported type so the API won't be cluttered with +// methods that are only relevant to the fake's implementation. +type server struct { + mu sync.Mutex + tables map[string]*table // keyed by fully qualified name + gcc chan int // set when gcloop starts, closed when server shuts down + + // Any unimplemented methods will cause a panic. + btapb.BigtableTableAdminServer + btpb.BigtableServer +} + +// NewServer creates a new Server. +// The Server will be listening for gRPC connections, without TLS, +// on the provided address. The resolved address is named by the Addr field. +func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + s := &Server{ + Addr: l.Addr().String(), + l: l, + srv: grpc.NewServer(opt...), + s: &server{ + tables: make(map[string]*table), + }, + } + btapb.RegisterBigtableTableAdminServer(s.srv, s.s) + btpb.RegisterBigtableServer(s.srv, s.s) + + go s.srv.Serve(s.l) + + return s, nil +} + +// Close shuts down the server. +func (s *Server) Close() { + s.s.mu.Lock() + if s.s.gcc != nil { + close(s.s.gcc) + } + s.s.mu.Unlock() + + s.srv.Stop() + s.l.Close() +} + +func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) { + tbl := req.Parent + "/tables/" + req.TableId + + s.mu.Lock() + if _, ok := s.tables[tbl]; ok { + s.mu.Unlock() + return nil, fmt.Errorf("table %q already exists", tbl) + } + s.tables[tbl] = newTable(req) + s.mu.Unlock() + + return &btapb.Table{Name: tbl}, nil +} + +func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) { + res := &btapb.ListTablesResponse{} + prefix := req.Parent + "/tables/" + + s.mu.Lock() + for tbl := range s.tables { + if strings.HasPrefix(tbl, prefix) { + res.Tables = append(res.Tables, &btapb.Table{Name: tbl}) + } + } + s.mu.Unlock() + + return res, nil +} + +func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) { + tbl := req.Name + + s.mu.Lock() + tblIns, ok := s.tables[tbl] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("table %q not found", tbl) + } + + return &btapb.Table{ + Name: tbl, + ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()), + }, nil +} + +func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.tables[req.Name]; !ok { + return nil, fmt.Errorf("no such table %q", req.Name) + } + delete(s.tables, req.Name) + return &emptypb.Empty{}, nil +} + +func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) { + tblName := req.Name[strings.LastIndex(req.Name, "/")+1:] + + s.mu.Lock() + tbl, ok := s.tables[req.Name] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.Name) + } + + tbl.mu.Lock() + defer tbl.mu.Unlock() + + for _, mod := range req.Modifications { + if create := mod.GetCreate(); create != nil { + if _, ok := tbl.families[mod.Id]; ok { + return nil, fmt.Errorf("family %q already exists", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + gcRule: create.GcRule, + } + tbl.families[mod.Id] = newcf + } else if mod.GetDrop() { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("can't delete unknown family %q", mod.Id) + } + delete(tbl.families, mod.Id) + } else if modify := mod.GetUpdate(); modify != nil { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("no such family %q", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + gcRule: modify.GcRule, + } + // assume that we ALWAYS want to replace by the new setting + // we may need partial update through + tbl.families[mod.Id] = newcf + } + } + + s.needGC() + return &btapb.Table{ + Name: tblName, + ColumnFamilies: toColumnFamilies(tbl.families), + }, nil +} + +func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return fmt.Errorf("no such table %q", req.TableName) + } + + // Rows to read can be specified by a set of row keys and/or a set of row ranges. + // Output is a stream of sorted, de-duped rows. + tbl.mu.RLock() + + rowSet := make(map[string]*row) + if req.Rows != nil { + // Add the explicitly given keys + for _, key := range req.Rows.RowKeys { + start := string(key) + addRows(start, start+"\x00", tbl, rowSet) + } + + // Add keys from row ranges + for _, rr := range req.Rows.RowRanges { + var start, end string + switch sk := rr.StartKey.(type) { + case *btpb.RowRange_StartKeyClosed: + start = string(sk.StartKeyClosed) + case *btpb.RowRange_StartKeyOpen: + start = string(sk.StartKeyOpen) + "\x00" + } + switch ek := rr.EndKey.(type) { + case *btpb.RowRange_EndKeyClosed: + end = string(ek.EndKeyClosed) + "\x00" + case *btpb.RowRange_EndKeyOpen: + end = string(ek.EndKeyOpen) + } + + addRows(start, end, tbl, rowSet) + } + } else { + // Read all rows + addRows("", "", tbl, rowSet) + } + tbl.mu.RUnlock() + + rows := make([]*row, 0, len(rowSet)) + for _, r := range rowSet { + rows = append(rows, r) + } + sort.Sort(byRowKey(rows)) + + limit := int(req.RowsLimit) + for i, r := range rows { + if limit > 0 && i >= limit { + return nil + } + if err := streamRow(stream, r, req.Filter); err != nil { + return err + } + } + return nil +} + +func addRows(start, end string, tbl *table, rowSet map[string]*row) { + si, ei := 0, len(tbl.rows) // half-open interval + if start != "" { + si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) + } + // Types that are valid to be assigned to StartKey: + // *RowRange_StartKeyClosed + // *RowRange_StartKeyOpen + if end != "" { + ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) + } + if si < ei { + for _, row := range tbl.rows[si:ei] { + rowSet[row.key] = row + } + } +} + +func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) error { + r.mu.Lock() + nr := r.copy() + r.mu.Unlock() + r = nr + + filterRow(f, r) + + rrr := &btpb.ReadRowsResponse{} + for col, cells := range r.cells { + i := strings.Index(col, ":") // guaranteed to exist + fam, col := col[:i], col[i+1:] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + for _, cell := range cells { + rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(r.key), + FamilyName: &wrappers.StringValue{Value: fam}, + Qualifier: &wrappers.BytesValue{Value: []byte(col)}, + TimestampMicros: cell.ts, + Value: cell.value, + }) + } + } + // We can't have a cell with just COMMIT set, which would imply a new empty cell. + // So modify the last cell to have the COMMIT flag set. + if len(rrr.Chunks) > 0 { + rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true} + } + + return stream.Send(rrr) +} + +// filterRow modifies a row with the given filter. +func filterRow(f *btpb.RowFilter, r *row) { + if f == nil { + return + } + // Handle filters that apply beyond just including/excluding cells. + switch f := f.Filter.(type) { + case *btpb.RowFilter_Chain_: + for _, sub := range f.Chain.Filters { + filterRow(sub, r) + } + return + case *btpb.RowFilter_Interleave_: + srs := make([]*row, 0, len(f.Interleave.Filters)) + for _, sub := range f.Interleave.Filters { + sr := r.copy() + filterRow(sub, sr) + srs = append(srs, sr) + } + // merge + // TODO(dsymonds): is this correct? + r.cells = make(map[string][]cell) + for _, sr := range srs { + for col, cs := range sr.cells { + r.cells[col] = append(r.cells[col], cs...) + } + } + for _, cs := range r.cells { + sort.Sort(byDescTS(cs)) + } + return + case *btpb.RowFilter_CellsPerColumnLimitFilter: + lim := int(f.CellsPerColumnLimitFilter) + for col, cs := range r.cells { + if len(cs) > lim { + r.cells[col] = cs[:lim] + } + } + return + } + + // Any other case, operate on a per-cell basis. + for key, cs := range r.cells { + i := strings.Index(key, ":") // guaranteed to exist + fam, col := key[:i], key[i+1:] + r.cells[key] = filterCells(f, fam, col, cs) + } +} + +func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell { + var ret []cell + for _, cell := range cs { + if includeCell(f, fam, col, cell) { + ret = append(ret, cell) + } + } + return ret +} + +func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool { + if f == nil { + return true + } + // TODO(dsymonds): Implement many more filters. + switch f := f.Filter.(type) { + default: + log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) + return true + case *btpb.RowFilter_FamilyNameRegexFilter: + pat := string(f.FamilyNameRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(fam) + case *btpb.RowFilter_ColumnQualifierRegexFilter: + pat := string(f.ColumnQualifierRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(col) + case *btpb.RowFilter_ValueRegexFilter: + pat := string(f.ValueRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.Match(cell.value) + } +} + +func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + fs := tbl.columnFamiliesSet() + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + if err := applyMutations(tbl, r, req.Mutations, fs); err != nil { + return nil, err + } + return &btpb.MutateRowResponse{}, nil +} + +func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return fmt.Errorf("no such table %q", req.TableName) + } + + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} + + fs := tbl.columnFamiliesSet() + + for i, entry := range req.Entries { + r := tbl.mutableRow(string(entry.RowKey)) + r.mu.Lock() + code, msg := int32(codes.OK), "" + if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { + code = int32(codes.Internal) + msg = err.Error() + } + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &statpb.Status{Code: code, Message: msg}, + } + r.mu.Unlock() + } + stream.Send(res) + return nil +} + +func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + res := &btpb.CheckAndMutateRowResponse{} + + fs := tbl.columnFamiliesSet() + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + // Figure out which mutation to apply. + whichMut := false + if req.PredicateFilter == nil { + // Use true_mutations iff row contains any cells. + whichMut = len(r.cells) > 0 + } else { + // Use true_mutations iff any cells in the row match the filter. + // TODO(dsymonds): This could be cheaper. + nr := r.copy() + filterRow(req.PredicateFilter, nr) + for _, cs := range nr.cells { + if len(cs) > 0 { + whichMut = true + break + } + } + // TODO(dsymonds): Figure out if this is supposed to be set + // even when there's no predicate filter. + res.PredicateMatched = whichMut + } + muts := req.FalseMutations + if whichMut { + muts = req.TrueMutations + } + + if err := applyMutations(tbl, r, muts, fs); err != nil { + return nil, err + } + return res, nil +} + +// applyMutations applies a sequence of mutations to a row. +// fam should be a snapshot of the keys of tbl.families. +// It assumes r.mu is locked. +func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]bool) error { + for _, mut := range muts { + switch mut := mut.Mutation.(type) { + default: + return fmt.Errorf("can't handle mutation type %T", mut) + case *btpb.Mutation_SetCell_: + set := mut.SetCell + if !fs[set.FamilyName] { + return fmt.Errorf("unknown family %q", set.FamilyName) + } + ts := set.TimestampMicros + if ts == -1 { // bigtable.ServerTime + ts = newTimestamp() + } + if !tbl.validTimestamp(ts) { + return fmt.Errorf("invalid timestamp %d", ts) + } + col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier) + + newCell := cell{ts: ts, value: set.Value} + r.cells[col] = appendOrReplaceCell(r.cells[col], newCell) + case *btpb.Mutation_DeleteFromColumn_: + del := mut.DeleteFromColumn + col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier) + + cs := r.cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil + } + if len(cs) == 0 { + delete(r.cells, col) + } else { + r.cells[col] = cs + } + case *btpb.Mutation_DeleteFromRow_: + r.cells = make(map[string][]cell) + } + } + return nil +} + +func maxTimestamp(x, y int64) int64 { + if x > y { + return x + } + return y +} + +func newTimestamp() int64 { + ts := time.Now().UnixNano() / 1e3 + ts -= ts % 1000 // round to millisecond granularity + return ts +} + +func appendOrReplaceCell(cs []cell, newCell cell) []cell { + replaced := false + for i, cell := range cs { + if cell.ts == newCell.ts { + cs[i] = newCell + replaced = true + break + } + } + if !replaced { + cs = append(cs, newCell) + } + sort.Sort(byDescTS(cs)) + return cs +} + +func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no such table %q", req.TableName) + } + + updates := make(map[string]cell) // copy of updated cells; keyed by full column name + + fs := tbl.columnFamiliesSet() + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + // Assume all mutations apply to the most recent version of the cell. + // TODO(dsymonds): Verify this assumption and document it in the proto. + for _, rule := range req.Rules { + if !fs[rule.FamilyName] { + return nil, fmt.Errorf("unknown family %q", rule.FamilyName) + } + + key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier) + + cells := r.cells[key] + ts := newTimestamp() + var newCell, prevCell cell + isEmpty := len(cells) == 0 + if !isEmpty { + prevCell = cells[0] + + // ts is the max of now or the prev cell's timestamp in case the + // prev cell is in the future + ts = maxTimestamp(ts, prevCell.ts) + } + + switch rule := rule.Rule.(type) { + default: + return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) + case *btpb.ReadModifyWriteRule_AppendValue: + newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)} + case *btpb.ReadModifyWriteRule_IncrementAmount: + var v int64 + if !isEmpty { + prevVal := prevCell.value + if len(prevVal) != 8 { + return nil, fmt.Errorf("increment on non-64-bit value") + } + v = int64(binary.BigEndian.Uint64(prevVal)) + } + v += rule.IncrementAmount + var val [8]byte + binary.BigEndian.PutUint64(val[:], uint64(v)) + newCell = cell{ts: ts, value: val[:]} + } + updates[key] = newCell + r.cells[key] = appendOrReplaceCell(r.cells[key], newCell) + } + + res := &btpb.Row{ + Key: req.RowKey, + } + for col, cell := range updates { + i := strings.Index(col, ":") + fam, qual := col[:i], col[i+1:] + var f *btpb.Family + for _, ff := range res.Families { + if ff.Name == fam { + f = ff + break + } + } + if f == nil { + f = &btpb.Family{Name: fam} + res.Families = append(res.Families, f) + } + f.Columns = append(f.Columns, &btpb.Column{ + Qualifier: []byte(qual), + Cells: []*btpb.Cell{{ + Value: cell.value, + }}, + }) + } + return &btpb.ReadModifyWriteRowResponse{Row: res}, nil +} + +// needGC is invoked whenever the server needs gcloop running. +func (s *server) needGC() { + s.mu.Lock() + if s.gcc == nil { + s.gcc = make(chan int) + go s.gcloop(s.gcc) + } + s.mu.Unlock() +} + +func (s *server) gcloop(done <-chan int) { + const ( + minWait = 500 // ms + maxWait = 1500 // ms + ) + + for { + // Wait for a random time interval. + d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond + select { + case <-time.After(d): + case <-done: + return // server has been closed + } + + // Do a GC pass over all tables. + var tables []*table + s.mu.Lock() + for _, tbl := range s.tables { + tables = append(tables, tbl) + } + s.mu.Unlock() + for _, tbl := range tables { + tbl.gc() + } + } +} + +type table struct { + mu sync.RWMutex + families map[string]*columnFamily // keyed by plain family name + rows []*row // sorted by row key + rowIndex map[string]*row // indexed by row key +} + +func newTable(ctr *btapb.CreateTableRequest) *table { + fams := make(map[string]*columnFamily) + if ctr.Table != nil { + for id, cf := range ctr.Table.ColumnFamilies { + fams[id] = &columnFamily{ + name: ctr.Parent + "/columnFamilies/" + id, + gcRule: cf.GcRule, + } + } + } + return &table{ + families: fams, + rowIndex: make(map[string]*row), + } +} + +func (t *table) validTimestamp(ts int64) bool { + // Assume millisecond granularity is required. + return ts%1000 == 0 +} + +func (t *table) columnFamilies() map[string]*columnFamily { + cp := make(map[string]*columnFamily) + t.mu.RLock() + for fam, cf := range t.families { + cp[fam] = cf + } + t.mu.RUnlock() + return cp +} + +func (t *table) columnFamiliesSet() map[string]bool { + fs := make(map[string]bool) + for fam := range t.columnFamilies() { + fs[fam] = true + } + return fs +} + +func (t *table) mutableRow(row string) *row { + // Try fast path first. + t.mu.RLock() + r := t.rowIndex[row] + t.mu.RUnlock() + if r != nil { + return r + } + + // We probably need to create the row. + t.mu.Lock() + r = t.rowIndex[row] + if r == nil { + r = newRow(row) + t.rowIndex[row] = r + t.rows = append(t.rows, r) + sort.Sort(byRowKey(t.rows)) // yay, inefficient! + } + t.mu.Unlock() + return r +} + +func (t *table) gc() { + // This method doesn't add or remove rows, so we only need a read lock for the table. + t.mu.RLock() + defer t.mu.RUnlock() + + // Gather GC rules we'll apply. + rules := make(map[string]*btapb.GcRule) // keyed by "fam" + for fam, cf := range t.families { + if cf.gcRule != nil { + rules[fam] = cf.gcRule + } + } + if len(rules) == 0 { + return + } + + for _, r := range t.rows { + r.mu.Lock() + r.gc(rules) + r.mu.Unlock() + } +} + +type byRowKey []*row + +func (b byRowKey) Len() int { return len(b) } +func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } + +type row struct { + key string + + mu sync.Mutex + cells map[string][]cell // keyed by full column name; cells are in descending timestamp order +} + +func newRow(key string) *row { + return &row{ + key: key, + cells: make(map[string][]cell), + } +} + +// copy returns a copy of the row. +// Cell values are aliased. +// r.mu should be held. +func (r *row) copy() *row { + nr := &row{ + key: r.key, + cells: make(map[string][]cell, len(r.cells)), + } + for col, cs := range r.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.cells[col] = append([]cell(nil), cs...) + } + return nr +} + +// gc applies the given GC rules to the row. +// r.mu should be held. +func (r *row) gc(rules map[string]*btapb.GcRule) { + for col, cs := range r.cells { + fam := col[:strings.Index(col, ":")] + rule, ok := rules[fam] + if !ok { + continue + } + r.cells[col] = applyGC(cs, rule) + } +} + +var gcTypeWarn sync.Once + +// applyGC applies the given GC rule to the cells. +func applyGC(cells []cell, rule *btapb.GcRule) []cell { + switch rule := rule.Rule.(type) { + default: + // TODO(dsymonds): Support GcRule_Intersection_ + gcTypeWarn.Do(func() { + log.Printf("Unsupported GC rule type %T", rule) + }) + case *btapb.GcRule_Union_: + for _, sub := range rule.Union.Rules { + cells = applyGC(cells, sub) + } + return cells + case *btapb.GcRule_MaxAge: + // Timestamps are in microseconds. + cutoff := time.Now().UnixNano() / 1e3 + cutoff -= rule.MaxAge.Seconds * 1e6 + cutoff -= int64(rule.MaxAge.Nanos) / 1e3 + // The slice of cells in in descending timestamp order. + // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. + si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) + if si < len(cells) { + log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) + } + return cells[:si] + case *btapb.GcRule_MaxNumVersions: + n := int(rule.MaxNumVersions) + if len(cells) > n { + cells = cells[:n] + } + return cells + } + return cells +} + +type cell struct { + ts int64 + value []byte +} + +type byDescTS []cell + +func (b byDescTS) Len() int { return len(b) } +func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } + +type columnFamily struct { + name string + gcRule *btapb.GcRule +} + +func (c *columnFamily) proto() *btapb.ColumnFamily { + return &btapb.ColumnFamily{ + GcRule: c.gcRule, + } +} + +func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily { + fs := make(map[string]*btapb.ColumnFamily) + for k, v := range families { + fs[k] = v.proto() + } + return fs +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go new file mode 100644 index 000000000..547da19e0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -0,0 +1,175 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bttest + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := s.CreateTable( + ctx, + &btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { + t.Fatal(err) + } + const name = `cluster/tables/t` + tbl := s.tables[name] + req := &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{ + { + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, + }, + }, + } + _, err := s.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatal(err) + } + req = &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{ + { + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{ + Update: &btapb.ColumnFamily{GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}}, + }, + }, + } + if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + var ts int64 + ms := func() []*btpb.Mutation { + return []*btpb.Mutation{ + { + Mutation: &btpb.Mutation_SetCell_{ + SetCell: &btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte(`col`), + TimestampMicros: atomic.AddInt64(&ts, 1000), + }, + }, + }, + } + } + + rmw := func() *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Rules: []*btpb.ReadModifyWriteRule{ + { + FamilyName: "cf", + ColumnQualifier: []byte("col"), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, + }, + }, + } + } + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + req := &btpb.MutateRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Mutations: ms(), + } + s.MutateRow(ctx, req) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + _, _ = s.ReadModifyWriteRow(ctx, rmw()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + tbl.gc() + }() + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(100 * time.Millisecond): + t.Error("Concurrent mutations and GCs haven't completed after 100ms") + } +} + +func TestCreateTableWithFamily(t *testing.T) { + // The Go client currently doesn't support creating a table with column families + // in one operation but it is allowed by the API. This must still be supported by the + // fake server so this test lives here instead of in the main bigtable + // integration test. + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}}, + "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}}, + }, + } + cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) + if err != nil { + t.Fatalf("Getting table: %v", err) + } + cf := tbl.ColumnFamilies["cf1"] + if cf == nil { + t.Fatalf("Missing col family cf1") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } + cf = tbl.ColumnFamilies["cf2"] + if cf == nil { + t.Fatalf("Missing col family cf2") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go new file mode 100644 index 000000000..706a0eb80 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go @@ -0,0 +1,738 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Command docs are in cbtdoc.go. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtrc" + "golang.org/x/net/context" +) + +var ( + oFlag = flag.String("o", "", "if set, redirect stdout to this file") + + config *cbtrc.Config + client *bigtable.Client + adminClient *bigtable.AdminClient + instanceAdminClient *bigtable.InstanceAdminClient +) + +func getClient() *bigtable.Client { + if client == nil { + var err error + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + } + return client +} + +func getAdminClient() *bigtable.AdminClient { + if adminClient == nil { + var err error + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + } + return adminClient +} + +func getInstanceAdminClient() *bigtable.InstanceAdminClient { + if instanceAdminClient == nil { + var err error + instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project) + if err != nil { + log.Fatalf("Making bigtable.InstanceAdminClient: %v", err) + } + } + return instanceAdminClient +} + +func main() { + var err error + config, err = cbtrc.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Usage = func() { usage(os.Stderr) } + flag.Parse() + if err := config.CheckFlags(); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() == 0 { + usage(os.Stderr) + os.Exit(1) + } + + if *oFlag != "" { + f, err := os.Create(*oFlag) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + os.Stdout = f + } + + ctx := context.Background() + for _, cmd := range commands { + if cmd.Name == flag.Arg(0) { + cmd.do(ctx, flag.Args()[1:]...) + return + } + } + log.Fatalf("Unknown command %q", flag.Arg(0)) +} + +func usage(w io.Writer) { + fmt.Fprintf(w, "Usage: %s [flags] ...\n", os.Args[0]) + flag.CommandLine.SetOutput(w) + flag.CommandLine.PrintDefaults() + fmt.Fprintf(w, "\n%s", cmdSummary) +} + +var cmdSummary string // generated in init, below + +func init() { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) + for _, cmd := range commands { + fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) + } + tw.Flush() + buf.WriteString(configHelp) + cmdSummary = buf.String() +} + +var configHelp = ` +For convenience, values of the -project, -instance and -creds flags +may be specified in ` + cbtrc.Filename() + ` in this format: + project = my-project-123 + instance = my-instance + creds = path-to-account-key.json +All values are optional, and all will be overridden by flags. +` + +var commands = []struct { + Name, Desc string + do func(context.Context, ...string) + Usage string +}{ + { + Name: "count", + Desc: "Count rows in a table", + do: doCount, + Usage: "cbt count
", + }, + { + Name: "createfamily", + Desc: "Create a column family", + do: doCreateFamily, + Usage: "cbt createfamily
", + }, + { + Name: "createtable", + Desc: "Create a table", + do: doCreateTable, + Usage: "cbt createtable
", + }, + { + Name: "deletefamily", + Desc: "Delete a column family", + do: doDeleteFamily, + Usage: "cbt deletefamily
", + }, + { + Name: "deleterow", + Desc: "Delete a row", + do: doDeleteRow, + Usage: "cbt deleterow
", + }, + { + Name: "deletetable", + Desc: "Delete a table", + do: doDeleteTable, + Usage: "cbt deletetable
", + }, + { + Name: "doc", + Desc: "Print godoc-suitable documentation for cbt", + do: doDoc, + Usage: "cbt doc", + }, + { + Name: "help", + Desc: "Print help text", + do: doHelp, + Usage: "cbt help [command]", + }, + { + Name: "listinstances", + Desc: "List instances in a project", + do: doListInstances, + Usage: "cbt listinstances", + }, + { + Name: "lookup", + Desc: "Read from a single row", + do: doLookup, + Usage: "cbt lookup
", + }, + { + Name: "ls", + Desc: "List tables and column families", + do: doLS, + Usage: "cbt ls List tables\n" + + "cbt ls
List column families in
", + }, + { + Name: "mddoc", + Desc: "Print documentation for cbt in Markdown format", + do: doMDDoc, + Usage: "cbt mddoc", + }, + { + Name: "read", + Desc: "Read rows", + do: doRead, + Usage: "cbt read
[start=] [end=] [prefix=] [count=]\n" + + " start= Start reading at this row\n" + + " end= Stop reading before this row\n" + + " prefix= Read rows with this prefix\n" + + " count= Read only this many rows\n", + }, + { + Name: "set", + Desc: "Set value of a cell", + do: doSet, + Usage: "cbt set
family:column=val[@ts] ...\n" + + " family:column=val[@ts] may be repeated to set multiple cells.\n" + + "\n" + + " ts is an optional integer timestamp.\n" + + " If it cannot be parsed, the `@ts` part will be\n" + + " interpreted as part of the value.", + }, + { + Name: "setgcpolicy", + Desc: "Set the GC policy for a column family", + do: doSetGCPolicy, + Usage: "cbt setgcpolicy
( maxage= | maxversions= )\n" + + "\n" + + ` maxage= Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" + + " maxversions= Maximum number of versions to preserve", + }, +} + +func doCount(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt count
") + } + tbl := getClient().Open(args[0]) + + n := 0 + err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { + n++ + return true + }, bigtable.RowFilter(bigtable.StripValueFilter())) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } + fmt.Println(n) +} + +func doCreateFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt createfamily
") + } + err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Creating column family: %v", err) + } +} + +func doCreateTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt createtable
") + } + err := getAdminClient().CreateTable(ctx, args[0]) + if err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doDeleteFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletefamily
") + } + err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Deleting column family: %v", err) + } +} + +func doDeleteRow(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deleterow
") + } + tbl := getClient().Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting row: %v", err) + } +} + +func doDeleteTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("Can't do `cbt deletetable %s`", args) + } + err := getAdminClient().DeleteTable(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting table: %v", err) + } +} + +// to break circular dependencies +var ( + doDocFn func(ctx context.Context, args ...string) + doHelpFn func(ctx context.Context, args ...string) + doMDDocFn func(ctx context.Context, args ...string) +) + +func init() { + doDocFn = doDocReal + doHelpFn = doHelpReal + doMDDocFn = doMDDocReal +} + +func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } +func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } +func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) } + +func docFlags() []*flag.Flag { + // Only include specific flags, in a specific order. + var flags []*flag.Flag + for _, name := range []string{"project", "instance", "creds"} { + f := flag.Lookup(name) + if f == nil { + log.Fatalf("Flag not linked: -%s", name) + } + flags = append(flags, f) + } + return flags +} + +func doDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := docTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad doc template: %v", err) + } + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Bad doc output: %v", err) + } + os.Stdout.Write(out) +} + +func indentLines(s, ind string) string { + ss := strings.Split(s, "\n") + for i, p := range ss { + ss[i] = ind + p + } + return strings.Join(ss, "\n") +} + +var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +{{.Desc}} + +Usage: +{{indent .Usage "\t"}} + + + +{{end}} +*/ +package main +`)) + +func doHelpReal(ctx context.Context, args ...string) { + if len(args) == 0 { + usage(os.Stdout) + return + } + for _, cmd := range commands { + if cmd.Name == args[0] { + fmt.Println(cmd.Usage) + return + } + } + log.Fatalf("Don't know command %q", args[0]) +} + +func doListInstances(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listinstances") + } + is, err := getInstanceAdminClient().Instances(ctx) + if err != nil { + log.Fatalf("Getting list of instances: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Instance Name\tInfo\n") + fmt.Fprintf(tw, "-------------\t----\n") + for _, i := range is { + fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName) + } + tw.Flush() +} + +func doLookup(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatalf("usage: cbt lookup
") + } + table, row := args[0], args[1] + tbl := getClient().Open(table) + r, err := tbl.ReadRow(ctx, row) + if err != nil { + log.Fatalf("Reading row: %v", err) + } + printRow(r) +} + +func printRow(r bigtable.Row) { + fmt.Println(strings.Repeat("-", 40)) + fmt.Println(r.Key()) + + var fams []string + for fam := range r { + fams = append(fams, fam) + } + sort.Strings(fams) + for _, fam := range fams { + ris := r[fam] + sort.Sort(byColumn(ris)) + for _, ri := range ris { + ts := time.Unix(0, int64(ri.Timestamp)*1e3) + fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) + fmt.Printf(" %q\n", ri.Value) + } + } +} + +type byColumn []bigtable.ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +func doLS(ctx context.Context, args ...string) { + switch len(args) { + default: + log.Fatalf("Can't do `cbt ls %s`", args) + case 0: + tables, err := getAdminClient().Tables(ctx) + if err != nil { + log.Fatalf("Getting list of tables: %v", err) + } + sort.Strings(tables) + for _, table := range tables { + fmt.Println(table) + } + case 1: + table := args[0] + ti, err := getAdminClient().TableInfo(ctx, table) + if err != nil { + log.Fatalf("Getting table info: %v", err) + } + sort.Strings(ti.Families) + for _, fam := range ti.Families { + fmt.Println(fam) + } + } +} + +func doMDDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := mddocTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad mddoc template: %v", err) + } + io.Copy(os.Stdout, &buf) +} + +var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +## {{.Desc}} + +{{indent .Usage "\t"}} + + + +{{end}} +`)) + +func doRead(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatalf("usage: cbt read
[args ...]") + } + tbl := getClient().Open(args[0]) + + parsed := make(map[string]string) + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "limit": + // Be nicer; we used to support this, but renamed it to "end". + log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") + case "start", "end", "prefix", "count": + parsed[key] = val + } + } + if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { + log.Fatal(`"start"/"end" may not be mixed with "prefix"`) + } + + var rr bigtable.RowRange + if start, end := parsed["start"], parsed["end"]; end != "" { + rr = bigtable.NewRange(start, end) + } else if start != "" { + rr = bigtable.InfiniteRange(start) + } + if prefix := parsed["prefix"]; prefix != "" { + rr = bigtable.PrefixRange(prefix) + } + + var opts []bigtable.ReadOption + if count := parsed["count"]; count != "" { + n, err := strconv.ParseInt(count, 0, 64) + if err != nil { + log.Fatalf("Bad count %q: %v", count, err) + } + opts = append(opts, bigtable.LimitRows(n)) + } + + // TODO(dsymonds): Support filters. + err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { + printRow(r) + return true + }, opts...) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } +} + +var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) + +func doSet(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") + } + tbl := getClient().Open(args[0]) + row := args[1] + mut := bigtable.NewMutation() + for _, arg := range args[2:] { + m := setArg.FindStringSubmatch(arg) + if m == nil { + log.Fatalf("Bad set arg %q", arg) + } + val := m[3] + ts := bigtable.Now() + if i := strings.LastIndex(val, "@"); i >= 0 { + // Try parsing a timestamp. + n, err := strconv.ParseInt(val[i+1:], 0, 64) + if err == nil { + val = val[:i] + ts = bigtable.Timestamp(n) + } + } + mut.Set(m[1], m[2], ts, []byte(val)) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + log.Fatalf("Applying mutation: %v", err) + } +} + +func doSetGCPolicy(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= )") + } + table := args[0] + fam := args[1] + + var pol bigtable.GCPolicy + switch p := args[2]; { + case strings.HasPrefix(p, "maxage="): + d, err := parseDuration(p[7:]) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxAgePolicy(d) + case strings.HasPrefix(p, "maxversions="): + n, err := strconv.ParseUint(p[12:], 10, 16) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxVersionsPolicy(int(n)) + default: + log.Fatalf("Bad GC policy %q", p) + } + if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil { + log.Fatalf("Setting GC policy: %v", err) + } +} + +// parseDuration parses a duration string. +// It is similar to Go's time.ParseDuration, except with a different set of supported units, +// and only simple formats supported. +func parseDuration(s string) (time.Duration, error) { + // [0-9]+[a-z]+ + + // Split [0-9]+ from [a-z]+. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + } + ds, u := s[:i], s[i:] + if ds == "" || u == "" { + return 0, fmt.Errorf("invalid duration %q", s) + } + // Parse them. + d, err := strconv.ParseUint(ds, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid duration %q: %v", s, err) + } + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, s) + } + if d > uint64((1<<63-1)/unit) { + // overflow + return 0, fmt.Errorf("invalid duration %q overflows", s) + } + return time.Duration(d) * unit, nil +} + +var unitMap = map[string]time.Duration{ + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": 24 * time.Hour, +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go new file mode 100644 index 000000000..350e4f006 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + "time" +) + +func TestParseDuration(t *testing.T) { + tests := []struct { + in string + // out or fail are mutually exclusive + out time.Duration + fail bool + }{ + {in: "10ms", out: 10 * time.Millisecond}, + {in: "3s", out: 3 * time.Second}, + {in: "60m", out: 60 * time.Minute}, + {in: "12h", out: 12 * time.Hour}, + {in: "7d", out: 168 * time.Hour}, + + {in: "", fail: true}, + {in: "0", fail: true}, + {in: "7ns", fail: true}, + {in: "14mo", fail: true}, + {in: "3.5h", fail: true}, + {in: "106752d", fail: true}, // overflow + } + for _, tc := range tests { + got, err := parseDuration(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseDuration(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + if got != tc.out { + t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go new file mode 100644 index 000000000..81981f366 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go @@ -0,0 +1,191 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: + + count Count rows in a table + createfamily Create a column family + createtable Create a table + deletefamily Delete a column family + deleterow Delete a row + deletetable Delete a table + doc Print godoc-suitable documentation for cbt + help Print help text + listinstances List instances in a project + lookup Read from a single row + ls List tables and column families + mddoc Print documentation for cbt in Markdown format + read Read rows + set Set value of a cell + setgcpolicy Set the GC policy for a column family + +Use "cbt help " for more information about a command. + +The options are: + + -project string + project ID + -instance string + Cloud Bigtable instance + -creds string + if set, use application credentials in this file + + +Count rows in a table + +Usage: + cbt count
+ + + + +Create a column family + +Usage: + cbt createfamily
+ + + + +Create a table + +Usage: + cbt createtable
+ + + + +Delete a column family + +Usage: + cbt deletefamily
+ + + + +Delete a row + +Usage: + cbt deleterow
+ + + + +Delete a table + +Usage: + cbt deletetable
+ + + + +Print godoc-suitable documentation for cbt + +Usage: + cbt doc + + + + +Print help text + +Usage: + cbt help [command] + + + + +List instances in a project + +Usage: + cbt listinstances + + + + +Read from a single row + +Usage: + cbt lookup
+ + + + +List tables and column families + +Usage: + cbt ls List tables + cbt ls
List column families in
+ + + + +Print documentation for cbt in Markdown format + +Usage: + cbt mddoc + + + + +Read rows + +Usage: + cbt read
[start=] [end=] [prefix=] [count=] + start= Start reading at this row + end= Stop reading before this row + prefix= Read rows with this prefix + count= Read only this many rows + + + + + +Set value of a cell + +Usage: + cbt set
family:column=val[@ts] ... + family:column=val[@ts] may be repeated to set multiple cells. + + ts is an optional integer timestamp. + If it cannot be parsed, the `@ts` part will be + interpreted as part of the value. + + + + +Set the GC policy for a column family + +Usage: + cbt setgcpolicy
( maxage= | maxversions= ) + + maxage= Maximum timestamp age to preserve (e.g. "1h", "4d") + maxversions= Maximum number of versions to preserve + + + + +*/ +package main diff --git a/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go new file mode 100644 index 000000000..67f2e188a --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go @@ -0,0 +1,42 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +cbtemulator launches the in-memory Cloud Bigtable server on the given address. +*/ +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable/bttest" +) + +var ( + host = flag.String("host", "localhost", "the address to bind to on the local machine") + port = flag.Int("port", 9000, "the port number to bind to on the local machine") +) + +func main() { + flag.Parse() + srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) + if err != nil { + log.Fatalf("failed to start emulator: %v", err) + } + + fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) + select {} +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go new file mode 100644 index 000000000..9ce6494ff --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go @@ -0,0 +1,186 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Loadtest does some load testing through the Go client library for Cloud Bigtable. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtrc" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") + csvOutput = flag.String("csv_output", "", + "output path for statistics in .csv format. If this file already exists it will be overwritten.") + poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") + reqCount = flag.Int("req_count", 100, "number of concurrent requests") + + config *cbtrc.Config + client *bigtable.Client + adminClient *bigtable.AdminClient +) + +func main() { + var err error + config, err = cbtrc.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 0 { + flag.Usage() + os.Exit(1) + } + + var options []option.ClientOption + if *poolSize > 1 { + options = append(options, option.WithGRPCConnectionPool(*poolSize)) + } + + var csvFile *os.File + if *csvOutput != "" { + csvFile, err = os.Create(*csvOutput) + if err != nil { + log.Fatalf("creating csv output file: %v", err) + } + defer csvFile.Close() + log.Printf("Writing statistics to %q ...", *csvOutput) + } + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + defer adminClient.Close() + + // Create a scratch table. + log.Printf("Setting up scratch table...") + if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { + log.Fatalf("Making scratch table %q: %v", *scratchTable, err) + } + if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { + log.Fatalf("Making scratch table column family: %v", err) + } + // Upon a successful run, delete the table. Don't bother checking for errors. + defer adminClient.DeleteTable(context.Background(), *scratchTable) + + log.Printf("Starting load test... (run for %v)", *runFor) + tbl := client.Open(*scratchTable) + sem := make(chan int, *reqCount) // limit the number of requests happening at once + var reads, writes stats + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + var stats *stats + defer func() { + stats.Record(ok, time.Since(opStart)) + }() + + row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows + + switch rand.Intn(10) { + default: + // read + stats = &reads + _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + if err != nil { + log.Printf("Error doing read: %v", err) + ok = false + } + case 0, 1, 2, 3, 4: + // write + stats = &writes + mut := bigtable.NewMutation() + mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write + if err := tbl.Apply(context.Background(), row, mut); err != nil { + log.Printf("Error doing mutation: %v", err) + ok = false + } + } + }() + } + wg.Wait() + + readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) + writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) + log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) + log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) + + if csvFile != nil { + stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) + } +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go new file mode 100644 index 000000000..acbfba969 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go @@ -0,0 +1,155 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Scantest does scan-related load testing against Cloud Bigtable. The logic here +mimics a similar test written using the Java client. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "text/tabwriter" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtrc" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") + rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") + + config *cbtrc.Config + client *bigtable.Client +) + +func main() { + flag.Usage = func() { + fmt.Printf("Usage: scantest [options] \n\n") + flag.PrintDefaults() + } + + var err error + config, err = cbtrc.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 1 { + flag.Usage() + os.Exit(1) + } + + table := flag.Arg(0) + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + + log.Printf("Starting scan test... (run for %v)", *runFor) + tbl := client.Open(table) + sem := make(chan int, *numScans) // limit the number of requests happening at once + var scans stats + + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + defer func() { + scans.Record(ok, time.Since(opStart)) + }() + + // Start at a random row key + key := fmt.Sprintf("user%d", rand.Int63()) + limit := bigtable.LimitRows(int64(*rowLimit)) + noop := func(bigtable.Row) bool { return true } + if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { + log.Printf("Error during scan: %v", err) + ok = false + } + }() + } + wg.Wait() + + agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) + log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", + scans.ok, scans.tries, agg, throughputString(agg)) +} + +func throughputString(agg *stat.Aggregate) string { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + rowLimitF := float64(*rowLimit) + fmt.Fprintf( + tw, + "min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", + rowLimitF/agg.Max.Seconds(), + rowLimitF/agg.Median.Seconds(), + rowLimitF/agg.Min.Seconds()) + tw.Flush() + return buf.String() +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/doc.go b/vendor/cloud.google.com/go/bigtable/doc.go new file mode 100644 index 000000000..449680559 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/doc.go @@ -0,0 +1,119 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bigtable is an API to Google Cloud Bigtable. + +See https://cloud.google.com/bigtable/docs/ for general product documentation. + +Setup and Credentials + +Use NewClient or NewAdminClient to create a client that can be used to access +the data or admin APIs respectively. Both require credentials that have permission +to access the Cloud Bigtable API. + +If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials +(https://developers.google.com/accounts/docs/application-default-credentials) +is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. + +To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. +For instance, you can use service account credentials by visiting +https://cloud.google.com/console/project/MYPROJECT/apiui/credential, +creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing + jsonKey, err := ioutil.ReadFile(pathToKeyFile) + ... + config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. + ... + client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) + ... +Here, `google` means the golang.org/x/oauth2/google package +and `option` means the google.golang.org/api/option package. + +Reading + +The principal way to read from a Bigtable is to use the ReadRows method on *Table. +A RowRange specifies a contiguous portion of a table. A Filter may be provided through +RowFilter to limit or transform the data that is returned. + tbl := client.Open("mytable") + ... + // Read all the rows starting with "com.google.", + // but only fetch the columns in the "links" family. + rr := bigtable.PrefixRange("com.google.") + err := tbl.ReadRows(ctx, rr, func(r Row) bool { + // do something with r + return true // keep going + }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) + ... + +To read a single row, use the ReadRow helper method. + r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key + ... + +Writing + +This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. +The former expresses idempotent operations. +The latter expresses non-idempotent operations and returns the new values of updated cells. +These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), +building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite +methods on a Table. + +For instance, to set a couple of cells in a table, + tbl := client.Open("mytable") + mut := bigtable.NewMutation() + mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) + mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) + err := tbl.Apply(ctx, "com.google.cloud", mut) + ... + +To increment an encoded value in one cell, + tbl := client.Open("mytable") + rmw := bigtable.NewReadModifyWrite() + rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" + r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) + ... + +Retries + +If a read or write operation encounters a transient error it will be retried until a successful +response, an unretryable error or the context deadline is reached. Non-idempotent writes (where +the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls +will not re-scan rows that have already been processed. +*/ +package bigtable // import "cloud.google.com/go/bigtable" + +// Scope constants for authentication credentials. +// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. +const ( + // Scope is the OAuth scope for Cloud Bigtable data operations. + Scope = "https://www.googleapis.com/auth/bigtable.data" + // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. + ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" + + // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. + AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" + + // InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. + InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" +) + +// clientUserAgent identifies the version of this package. +// It should be bumped upon significant changes only. +const clientUserAgent = "cbt-go/20160628" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" diff --git a/vendor/cloud.google.com/go/bigtable/filter.go b/vendor/cloud.google.com/go/bigtable/filter.go new file mode 100644 index 000000000..5c2c1e20f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/filter.go @@ -0,0 +1,156 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Filter represents a row filter. +type Filter interface { + String() string + proto() *btpb.RowFilter +} + +// ChainFilters returns a filter that applies a sequence of filters. +func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } + +type chainFilter struct { + sub []Filter +} + +func (cf chainFilter) String() string { + var ss []string + for _, sf := range cf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " | ") + ")" +} + +func (cf chainFilter) proto() *btpb.RowFilter { + chain := &btpb.RowFilter_Chain{} + for _, sf := range cf.sub { + chain.Filters = append(chain.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Chain_{chain}, + } +} + +// InterleaveFilters returns a filter that applies a set of filters in parallel +// and interleaves the results. +func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } + +type interleaveFilter struct { + sub []Filter +} + +func (ilf interleaveFilter) String() string { + var ss []string + for _, sf := range ilf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " + ") + ")" +} + +func (ilf interleaveFilter) proto() *btpb.RowFilter { + inter := &btpb.RowFilter_Interleave{} + for _, sf := range ilf.sub { + inter.Filters = append(inter.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + } +} + +// RowKeyFilter returns a filter that matches cells from rows whose +// key matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } + +type rowKeyFilter string + +func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } + +func (rkf rowKeyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} +} + +// FamilyFilter returns a filter that matches cells whose family name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } + +type familyFilter string + +func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } + +func (ff familyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}} +} + +// ColumnFilter returns a filter that matches cells whose column name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } + +type columnFilter string + +func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } + +func (cf columnFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} +} + +// ValueFilter returns a filter that matches cells whose value +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ValueFilter(pattern string) Filter { return valueFilter(pattern) } + +type valueFilter string + +func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } + +func (vf valueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}} +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +func LatestNFilter(n int) Filter { return latestNFilter(n) } + +type latestNFilter int32 + +func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } + +func (lnf latestNFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} +} + +// StripValueFilter returns a filter that replaces each value with the empty string. +func StripValueFilter() Filter { return stripValueFilter{} } + +type stripValueFilter struct{} + +func (stripValueFilter) String() string { return "strip_value()" } +func (stripValueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}} +} + +// TODO(dsymonds): More filters: cond, col/ts/value range, sampling diff --git a/vendor/cloud.google.com/go/bigtable/gc.go b/vendor/cloud.google.com/go/bigtable/gc.go new file mode 100644 index 000000000..621c7b359 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. +type GCPolicy interface { + String() string + proto() *bttdpb.GcRule +} + +// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. +func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } + +type intersectionPolicy struct { + sub []GCPolicy +} + +func (ip intersectionPolicy) String() string { + var ss []string + for _, sp := range ip.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " && ") + ")" +} + +func (ip intersectionPolicy) proto() *bttdpb.GcRule { + inter := &bttdpb.GcRule_Intersection{} + for _, sp := range ip.sub { + inter.Rules = append(inter.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Intersection_{inter}, + } +} + +// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. +func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } + +type unionPolicy struct { + sub []GCPolicy +} + +func (up unionPolicy) String() string { + var ss []string + for _, sp := range up.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " || ") + ")" +} + +func (up unionPolicy) proto() *bttdpb.GcRule { + union := &bttdpb.GcRule_Union{} + for _, sp := range up.sub { + union.Rules = append(union.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Union_{union}, + } +} + +// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell +// except for the most recent n. +func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } + +type maxVersionsPolicy int + +func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } + +func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} +} + +// MaxAgePolicy returns a GC policy that applies to all cells +// older than the given age. +func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } + +type maxAgePolicy time.Duration + +var units = []struct { + d time.Duration + suffix string +}{ + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, +} + +func (ma maxAgePolicy) String() string { + d := time.Duration(ma) + for _, u := range units { + if d%u.d == 0 { + return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) + } + } + return fmt.Sprintf("age() > %d", d/time.Microsecond) +} + +func (ma maxAgePolicy) proto() *bttdpb.GcRule { + // This doesn't handle overflows, etc. + // Fix this if people care about GC policies over 290 years. + ns := time.Duration(ma).Nanoseconds() + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ + Seconds: ns / 1e9, + Nanos: int32(ns % 1e9), + }}, + } +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/cbtrc/cbtrc.go b/vendor/cloud.google.com/go/bigtable/internal/cbtrc/cbtrc.go new file mode 100644 index 000000000..4578ac3e6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/cbtrc/cbtrc.go @@ -0,0 +1,99 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cbtrc encapsulates common code for reading .cbtrc files. +package cbtrc + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +// Config represents a configuration. +type Config struct { + Project, Instance string // required + Creds string // optional +} + +// RegisterFlags registers a set of standard flags for this config. +// It should be called before flag.Parse. +func (c *Config) RegisterFlags() { + flag.StringVar(&c.Project, "project", c.Project, "project ID") + flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") + flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") +} + +// CheckFlags checks that the required config values are set. +func (c *Config) CheckFlags() error { + var missing []string + if c.Project == "" { + missing = append(missing, "-project") + } + if c.Instance == "" { + missing = append(missing, "-instance") + } + if len(missing) > 0 { + return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) + } + return nil +} + +// Filename returns the filename consulted for standard configuration. +func Filename() string { + // TODO(dsymonds): Might need tweaking for Windows. + return filepath.Join(os.Getenv("HOME"), ".cbtrc") +} + +// Load loads a .cbtrc file. +// If the file is not present, an empty config is returned. +func Load() (*Config, error) { + filename := Filename() + data, err := ioutil.ReadFile(filename) + if err != nil { + // silent fail if the file isn't there + if os.IsNotExist(err) { + return &Config{}, nil + } + return nil, fmt.Errorf("Reading %s: %v", filename, err) + } + c := new(Config) + s := bufio.NewScanner(bytes.NewReader(data)) + for s.Scan() { + line := s.Text() + i := strings.Index(line, "=") + if i < 0 { + return nil, fmt.Errorf("Bad line in %s: %q", filename, line) + } + key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + switch key { + default: + return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) + case "project": + c.Project = val + case "instance": + c.Instance = val + case "creds": + c.Creds = val + } + } + return c, s.Err() +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go new file mode 100644 index 000000000..60a18bee6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type CallOption interface { + Resolve(*CallSettings) +} + +type callOptions []CallOption + +func (opts callOptions) Resolve(s *CallSettings) *CallSettings { + for _, opt := range opts { + opt.Resolve(s) + } + return s +} + +// Encapsulates the call settings for a particular API call. +type CallSettings struct { + Timeout time.Duration + RetrySettings RetrySettings +} + +// Per-call configurable settings for retrying upon transient failure. +type RetrySettings struct { + RetryCodes map[codes.Code]bool + BackoffSettings BackoffSettings +} + +// Parameters to the exponential backoff algorithm for retrying. +type BackoffSettings struct { + DelayTimeoutSettings MultipliableDuration + RPCTimeoutSettings MultipliableDuration +} + +type MultipliableDuration struct { + Initial time.Duration + Max time.Duration + Multiplier float64 +} + +func (w CallSettings) Resolve(s *CallSettings) { + s.Timeout = w.Timeout + s.RetrySettings = w.RetrySettings + + s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) + for key, value := range w.RetrySettings.RetryCodes { + s.RetrySettings.RetryCodes[key] = value + } +} + +type withRetryCodes []codes.Code + +func (w withRetryCodes) Resolve(s *CallSettings) { + s.RetrySettings.RetryCodes = make(map[codes.Code]bool) + for _, code := range w { + s.RetrySettings.RetryCodes[code] = true + } +} + +// WithRetryCodes sets a list of Google API canonical error codes upon which a +// retry should be attempted. +func WithRetryCodes(retryCodes []codes.Code) CallOption { + return withRetryCodes(retryCodes) +} + +type withDelayTimeoutSettings MultipliableDuration + +func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { + s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) +} + +// WithDelayTimeoutSettings specifies: +// - The initial delay time, in milliseconds, between the completion of +// the first failed request and the initiation of the first retrying +// request. +// - The multiplier by which to increase the delay time between the +// completion of failed requests, and the initiation of the subsequent +// retrying request. +// - The maximum delay time, in milliseconds, between requests. When this +// value is reached, `RetryDelayMultiplier` will no longer be used to +// increase delay time. +func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { + return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go new file mode 100644 index 000000000..b7be7d41e --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "log" + "os" +) + +var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) + +// A user defined call stub. +type APICall func(context.Context) error + +// scaleDuration returns the product of a and mult. +func scaleDuration(a time.Duration, mult float64) time.Duration { + ns := float64(a) * mult + return time.Duration(ns) +} + +// invokeWithRetry calls stub using an exponential backoff retry mechanism +// based on the values provided in callSettings. +func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { + retrySettings := callSettings.RetrySettings + backoffSettings := callSettings.RetrySettings.BackoffSettings + delay := backoffSettings.DelayTimeoutSettings.Initial + for { + // If the deadline is exceeded... + if ctx.Err() != nil { + return ctx.Err() + } + err := stub(ctx) + code := grpc.Code(err) + if code == codes.OK { + return nil + } + + if !retrySettings.RetryCodes[code] { + return err + } + + // Sleep a random amount up to the current delay + d := time.Duration(rand.Int63n(int64(delay))) + delayCtx, _ := context.WithTimeout(ctx, delay) + logger.Printf("Retryable error: %v, retrying in %v", err, d) + <-delayCtx.Done() + + delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) + if delay > backoffSettings.DelayTimeoutSettings.Max { + delay = backoffSettings.DelayTimeoutSettings.Max + } + } +} + +// Invoke calls stub with a child of context modified by the specified options. +func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { + settings := &CallSettings{} + callOptions(opts).Resolve(settings) + if len(settings.RetrySettings.RetryCodes) > 0 { + return invokeWithRetry(ctx, stub, *settings) + } + return stub(ctx) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go new file mode 100644 index 000000000..1e25f78e8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package gax + +import ( + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestRandomizedDelays(t *testing.T) { + max := 200 * time.Millisecond + settings := []CallOption{ + WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), + WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), + } + + deadline := time.Now().Add(1 * time.Second) + ctx, _ := context.WithDeadline(context.Background(), deadline) + var invokeTime time.Time + Invoke(ctx, func(childCtx context.Context) error { + // Keep failing, make sure we never slept more than max (plus a fudge factor) + if !invokeTime.IsZero() { + if time.Since(invokeTime) > (max + 20*time.Millisecond) { + t.Fatalf("Slept too long: %v", max) + } + } + invokeTime = time.Now() + // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 + errf := grpc.Errorf + return errf(codes.Unavailable, "") + }, settings...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/vendor/cloud.google.com/go/bigtable/internal/option/option.go new file mode 100644 index 000000000..3b9072e65 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/option/option.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package option contains common code for dealing with client options. +package option + +import ( + "fmt" + "os" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// DefaultClientOptions returns the default client options to use for the +// client's gRPC connection. +func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { + var o []option.ClientOption + // Check the environment variables for the bigtable emulator. + // Dial it directly and don't pass any credentials. + if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("emulator grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(endpoint), + option.WithScopes(scope), + option.WithUserAgent(userAgent), + } + } + return o, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go new file mode 100644 index 000000000..5fb047f60 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stat + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "math" + "sort" + "strconv" + "text/tabwriter" + "time" +) + +type byDuration []time.Duration + +func (data byDuration) Len() int { return len(data) } +func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } +func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } + +// quantile returns a value representing the kth of q quantiles. +// May alter the order of data. +func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { + if len(data) < 1 { + return 0, false + } + if k > q { + return 0, false + } + if k < 0 || q < 1 { + return 0, false + } + + sort.Sort(byDuration(data)) + + if k == 0 { + return data[0], true + } + if k == q { + return data[len(data)-1], true + } + + bucketSize := float64(len(data)-1) / float64(q) + i := float64(k) * bucketSize + + lower := int(math.Trunc(i)) + var upper int + if i > float64(lower) && lower+1 < len(data) { + // If the quantile lies between two elements + upper = lower + 1 + } else { + upper = lower + } + weightUpper := i - float64(lower) + weightLower := 1 - weightUpper + return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true +} + +type Aggregate struct { + Name string + Count, Errors int + Min, Median, Max time.Duration + P75, P90, P95, P99 time.Duration // percentiles +} + +// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. +func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { + agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} + + if len(latencies) == 0 { + return nil + } + var ok bool + if agg.Min, ok = quantile(latencies, 0, 2); !ok { + return nil + } + if agg.Median, ok = quantile(latencies, 1, 2); !ok { + return nil + } + if agg.Max, ok = quantile(latencies, 2, 2); !ok { + return nil + } + if agg.P75, ok = quantile(latencies, 75, 100); !ok { + return nil + } + if agg.P90, ok = quantile(latencies, 90, 100); !ok { + return nil + } + if agg.P95, ok = quantile(latencies, 95, 100); !ok { + return nil + } + if agg.P99, ok = quantile(latencies, 99, 100); !ok { + return nil + } + return &agg +} + +func (agg *Aggregate) String() string { + if agg == nil { + return "no data" + } + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", + agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) + tw.Flush() + return buf.String() +} + +// WriteCSV writes a csv file to the given Writer, +// with a header row and one row per aggregate. +func WriteCSV(aggs []*Aggregate, iow io.Writer) error { + w := csv.NewWriter(iow) + defer w.Flush() + err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) + if err != nil { + return err + } + for _, agg := range aggs { + err = w.Write([]string{ + agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), + agg.Min.String(), agg.Median.String(), agg.Max.String(), + agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader.go b/vendor/cloud.google.com/go/bigtable/reader.go new file mode 100644 index 000000000..4af2f7020 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader.go @@ -0,0 +1,250 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "bytes" + "fmt" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Row is returned by ReadRows. The map is keyed by column family (the prefix +// of the column name before the colon). The values are the returned ReadItems +// for that column family in the order returned by Read. +type Row map[string][]ReadItem + +// Key returns the row's key, or "" if the row is empty. +func (r Row) Key() string { + for _, items := range r { + if len(items) > 0 { + return items[0].Row + } + } + return "" +} + +// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. +type ReadItem struct { + Row, Column string + Timestamp Timestamp + Value []byte +} + +// The current state of the read rows state machine. +type rrState int64 + +const ( + newRow rrState = iota + rowInProgress + cellInProgress +) + +// chunkReader handles cell chunks from the read rows response and combines +// them into full Rows. +type chunkReader struct { + state rrState + curKey []byte + curFam string + curQual []byte + curTS int64 + curVal []byte + curRow Row + lastKey string +} + +// newChunkReader returns a new chunkReader for handling read rows responses. +func newChunkReader() *chunkReader { + return &chunkReader{state: newRow} +} + +// Process takes a cell chunk and returns a new Row if the given chunk +// completes a Row, or nil otherwise. +func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { + var row Row + switch cr.state { + case newRow: + if err := cr.validateNewRow(cc); err != nil { + return nil, err + } + + cr.curRow = make(Row) + cr.curKey = cc.RowKey + cr.curFam = cc.FamilyName.Value + cr.curQual = cc.Qualifier.Value + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case rowInProgress: + if err := cr.validateRowInProgress(cc); err != nil { + return nil, err + } + + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + + if cc.FamilyName != nil { + cr.curFam = cc.FamilyName.Value + } + if cc.Qualifier != nil { + cr.curQual = cc.Qualifier.Value + } + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case cellInProgress: + if err := cr.validateCellInProgress(cc); err != nil { + return nil, err + } + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + row = cr.handleCellValue(cc) + } + + return row, nil +} + +// Close must be called after all cell chunks from the response +// have been processed. An error will be returned if the reader is +// in an invalid state, in which case the error should be propagated to the caller. +func (cr *chunkReader) Close() error { + if cr.state != newRow { + return fmt.Errorf("invalid state for end of stream %q", cr.state) + } + return nil +} + +// handleCellValue returns a Row if the cell value includes a commit, otherwise nil. +func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { + if cc.ValueSize > 0 { + // ValueSize is specified so expect a split value of ValueSize bytes + if cr.curVal == nil { + cr.curVal = make([]byte, 0, cc.ValueSize) + } + cr.curVal = append(cr.curVal, cc.Value...) + cr.state = cellInProgress + } else { + // This cell is either the complete value or the last chunk of a split + if cr.curVal == nil { + cr.curVal = cc.Value + } else { + cr.curVal = append(cr.curVal, cc.Value...) + } + cr.finishCell() + + if cc.GetCommitRow() { + return cr.commitRow() + } else { + cr.state = rowInProgress + } + } + + return nil +} + +func (cr *chunkReader) finishCell() { + ri := ReadItem{ + Row: string(cr.curKey), + Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual), + Timestamp: Timestamp(cr.curTS), + Value: cr.curVal, + } + cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) + cr.curVal = nil +} + +func (cr *chunkReader) commitRow() Row { + row := cr.curRow + cr.lastKey = cr.curRow.Key() + cr.resetToNewRow() + return row +} + +func (cr *chunkReader) resetToNewRow() { + cr.curKey = nil + cr.curFam = "" + cr.curQual = nil + cr.curVal = nil + cr.curRow = nil + cr.curTS = 0 + cr.state = newRow +} + +func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { + if cc.GetResetRow() { + return fmt.Errorf("reset_row not allowed between rows") + } + if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { + return fmt.Errorf("missing key field for new row %v", cc) + } + if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { + return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) + } + return nil +} + +func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { + return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) + } + if cc.FamilyName != nil && cc.Qualifier == nil { + return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) + } + return nil +} + +func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cr.curVal == nil { + return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) + } + if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { + return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) + } + return nil +} + +func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { + return cc.RowKey != nil || + cc.FamilyName != nil || + cc.Qualifier != nil || + cc.TimestampMicros != 0 +} + +// Validate a RowStatus, commit or reset, if present. +func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { + // Resets can't be specified with any other part of a cell + if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || + cc.Value != nil || + cc.ValueSize != 0 || + cc.Labels != nil) { + return fmt.Errorf("reset must not be specified with other fields %v", cc) + } + if cc.GetCommitRow() && cc.ValueSize > 0 { + return fmt.Errorf("commit row found in between chunks in a cell") + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader_test.go b/vendor/cloud.google.com/go/bigtable/reader_test.go new file mode 100644 index 000000000..24a179148 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader_test.go @@ -0,0 +1,343 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/wrappers" + btspb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// Indicates that a field in the proto should be omitted, rather than included +// as a wrapped empty string. +const nilStr = "<>" + +func TestSingleCell(t *testing.T) { + cr := newChunkReader() + + // All in one cell + row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + if len(row["fm"]) != 1 { + t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) + } + want := []ReadItem{ri("rk", "fm", "col", 1, "value")} + if !reflect.DeepEqual(row["fm"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false)) + row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "val1"), + ri("rs", "fm1", "col1", 1, "val2"), + ri("rs", "fm1", "col2", 0, "val3"), + } + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + want = []ReadItem{ + ri("rs", "fm2", "col1", 0, "val4"), + ri("rs", "fm2", "col2", 1, "extralongval5"), + } + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestSplitCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false)) + cr.Process(ccData("world", 0, false)) + row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "hello world"), + ri("rs", "fm1", "col2", 0, "val2"), + } + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleRows(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestBlankQualifier(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestReset(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(ccReset()) + row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestNewFamEmptyQualifier(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + _, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) + if err == nil { + t.Fatalf("Expected error on second chunk with no qualifier set") + } +} + +// The read rows acceptance test reads a json file specifying a number of tests, +// each consisting of one or more cell chunk text protos and one or more resulting +// cells or errors. +type AcceptanceTest struct { + Tests []TestCase `json:"tests"` +} + +type TestCase struct { + Name string `json:"name"` + Chunks []string `json:"chunks"` + Results []TestResult `json:"results"` +} + +type TestResult struct { + RK string `json:"rk"` + FM string `json:"fm"` + Qual string `json:"qual"` + TS int64 `json:"ts"` + Value string `json:"value"` + Error bool `json:"error"` // If true, expect an error. Ignore any other field. +} + +func TestAcceptance(t *testing.T) { + testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") + if err != nil { + t.Fatalf("could not open acceptance test file %v", err) + } + + var accTest AcceptanceTest + err = json.Unmarshal(testJson, &accTest) + if err != nil { + t.Fatalf("could not parse acceptance test file: %v", err) + } + + for _, test := range accTest.Tests { + runTestCase(t, test) + } +} + +func runTestCase(t *testing.T, test TestCase) { + // Increment an index into the result array as we get results + cr := newChunkReader() + var results []TestResult + var seenErr bool + for _, chunkText := range test.Chunks { + // Parse and pass each cell chunk to the ChunkReader + cc := &btspb.ReadRowsResponse_CellChunk{} + err := proto.UnmarshalText(chunkText, cc) + if err != nil { + t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) + return + } + row, err := cr.Process(cc) + if err != nil { + results = append(results, TestResult{Error: true}) + seenErr = true + break + } else { + // Turn the Row into TestResults + for fm, ris := range row { + for _, ri := range ris { + tr := TestResult{ + RK: ri.Row, + FM: fm, + Qual: strings.Split(ri.Column, ":")[1], + TS: int64(ri.Timestamp), + Value: string(ri.Value), + } + results = append(results, tr) + } + } + } + } + + // Only Close if we don't have an error yet, otherwise Close: is expected. + if !seenErr { + err := cr.Close() + if err != nil { + results = append(results, TestResult{Error: true}) + } + } + + got := toSet(results) + want := toSet(test.Results) + if !reflect.DeepEqual(got, want) { + t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) + } +} + +func toSet(res []TestResult) map[TestResult]bool { + set := make(map[TestResult]bool) + for _, tr := range res { + set[tr] = true + } + return set +} + +// ri returns a ReadItem for the given components +func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { + return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} +} + +// cc returns a CellChunk proto +func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + // The components of the cell key are wrapped and can be null or empty + var rkWrapper []byte + if rk == nilStr { + rkWrapper = nil + } else { + rkWrapper = []byte(rk) + } + + var fmWrapper *wrappers.StringValue + if fm != nilStr { + fmWrapper = &wrappers.StringValue{Value: fm} + } else { + fmWrapper = nil + } + + var qualWrapper *wrappers.BytesValue + if qual != nilStr { + qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} + } else { + qualWrapper = nil + } + + return &btspb.ReadRowsResponse_CellChunk{ + RowKey: rkWrapper, + FamilyName: fmWrapper, + Qualifier: qualWrapper, + TimestampMicros: ts, + Value: []byte(val), + ValueSize: size, + RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} +} + +// ccData returns a CellChunk with only a value and size +func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + return cc(nilStr, nilStr, nilStr, 0, val, size, commit) +} + +// ccReset returns a CellChunk with RestRow set to true +func ccReset() *btspb.ReadRowsResponse_CellChunk { + return &btspb.ReadRowsResponse_CellChunk{ + RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} +} diff --git a/vendor/cloud.google.com/go/bigtable/retry_test.go b/vendor/cloud.google.com/go/bigtable/retry_test.go new file mode 100644 index 000000000..9920f6159 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/retry_test.go @@ -0,0 +1,359 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "reflect" + "strings" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + "google.golang.org/api/option" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + rpcpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { + srv, err := bttest.NewServer("127.0.0.1:0", opt...) + if err != nil { + return nil, nil, err + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + return nil, nil, err + } + + client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + + adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + if err := adminClient.CreateTable(context.Background(), "table"); err != nil { + return nil, nil, err + } + if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { + return nil, nil, err + } + t := client.Open("table") + + cleanupFunc := func() { + adminClient.Close() + client.Close() + srv.Close() + } + return t, cleanupFunc, nil +} + +func TestRetryApply(t *testing.T) { + ctx := context.Background() + + errCount := 0 + code := codes.Unavailable // Will be retried + // Intercept requests and return an error or defer to the underlying handler + errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { + errCount++ + return nil, grpc.Errorf(code, "") + } + return handler(ctx, req) + } + tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + mut := NewMutation() + mut.Set("cf", "col", 1, []byte("val")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("applying single mutation with retries: %v", err) + } + row, err := tbl.ReadRow(ctx, "row1") + if err != nil { + t.Errorf("reading single value with retries: %v", err) + } + if row == nil { + t.Errorf("applying single mutation with retries: could not read back row") + } + + code = codes.FailedPrecondition // Won't be retried + errCount = 0 + if err := tbl.Apply(ctx, "row", mut); err == nil { + t.Errorf("applying single mutation with no retries: no error") + } + + // Check and mutate + mutTrue := NewMutation() + mutTrue.DeleteRow() + mutFalse := NewMutation() + mutFalse.Set("cf", "col", 1, []byte("val")) + condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) + + errCount = 0 + code = codes.Unavailable // Will be retried + if err := tbl.Apply(ctx, "row1", condMut); err != nil { + t.Errorf("conditionally mutating row with retries: %v", err) + } + row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table + if err != nil { + t.Errorf("reading single value after conditional mutation: %v", err) + } + if row != nil { + t.Errorf("reading single value after conditional mutation: row not deleted") + } + + errCount = 0 + code = codes.FailedPrecondition // Won't be retried + if err := tbl.Apply(ctx, "row", condMut); err == nil { + t.Errorf("conditionally mutating row with no retries: no error") + } +} + +func TestRetryApplyBulk(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "MutateRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + if errCount < 3 { + errCount++ + return grpc.Errorf(codes.Aborted, "") + } + return nil + } + mut := NewMutation() + mut.Set("cf", "col", 1, []byte{}) + errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) + if errors != nil || err != nil { + t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) + } + + // Test failures and retries in one request + errCount = 0 + m1 := NewMutation() + m1.Set("cf", "col", 1, []byte{}) + m2 := NewMutation() + m2.Set("cf", "col2", 1, []byte{}) + m3 := NewMutation() + m3.Set("cf", "col3", 1, []byte{}) + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Two mutations fail + writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) + err = nil + case 2: + // Two failures were retried. One will succeed. + if want, got := 2, len(req.Entries); want != got { + t.Errorf("2 bulk retries, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK, codes.Aborted) + err = nil + case 3: + // One failure was retried and will succeed. + if want, got := 1, len(req.Entries); want != got { + t.Errorf("1 bulk retry, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK) + err = nil + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + if errors != nil || err != nil { + t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) + } + + // Test unretryable errors + niMut := NewMutation() + niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent + errCount = 0 + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Give non-idempotent mutation a retryable error code. + // Nothing should be retried. + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) + err = nil + case 1: + t.Errorf("unretryable errors: got one retry, want no retries") + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) + if err != nil { + t.Errorf("unretryable errors: request failed %v") + } + want := []error{ + grpc.Errorf(codes.FailedPrecondition, ""), + grpc.Errorf(codes.Aborted, ""), + } + if !reflect.DeepEqual(want, errors) { + t.Errorf("unretryable errors: got: %v, want: %v", errors, want) + } + + // Test individual errors and a deadline exceeded + f = func(ss grpc.ServerStream) error { + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) + return nil + } + ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + wantErr := context.DeadlineExceeded + if wantErr != err { + t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) + } + if errors != nil { + t.Errorf("deadline exceeded errors: got: %v, want: nil", err) + } +} + +func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} + for i, code := range codes { + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &rpcpb.Status{Code: int32(code), Message: ""}, + } + } + return ss.SendMsg(res) +} + +func TestRetainRowsAfter(t *testing.T) { + prevRowRange := NewRange("a", "z") + prevRowKey := "m" + want := NewRange("m\x00", "z") + got := prevRowRange.retainRowsAfter(prevRowKey) + if !reflect.DeepEqual(want, got) { + t.Errorf("range retry: got %v, want %v", got, want) + } + + prevRowList := RowList{"a", "b", "c", "d", "e", "f"} + prevRowKey = "b" + wantList := RowList{"c", "d", "e", "f"} + got = prevRowList.retainRowsAfter(prevRowKey) + if !reflect.DeepEqual(wantList, got) { + t.Errorf("list retry: got %v, want %v", got, wantList) + } +} + +func TestRetryReadRows(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "ReadRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.ReadRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Write two rows then error + writeReadRowsResponse(ss, "a", "b") + err = grpc.Errorf(codes.Unavailable, "") + case 2: + // Retryable request failure + if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("2 range retries: got %q, want %q", got, want) + } + err = grpc.Errorf(codes.Unavailable, "") + case 3: + // Write two more rows + writeReadRowsResponse(ss, "c", "d") + err = nil + } + errCount++ + return err + } + + var got []string + tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { + got = append(got, r.Key()) + return true + }) + want := []string{"a", "b", "c", "d"} + if !reflect.DeepEqual(got, want) { + t.Errorf("retry range integration: got %v, want %v", got, want) + } +} + +func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { + var chunks []*btpb.ReadRowsResponse_CellChunk + for _, key := range rowKeys { + chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(key), + FamilyName: &wrappers.StringValue{Value: "fm"}, + Qualifier: &wrappers.BytesValue{Value: []byte("col")}, + RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, + }) + } + return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) +} diff --git a/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json new file mode 100644 index 000000000..4973831f4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go new file mode 100644 index 000000000..6ba428dc6 --- /dev/null +++ b/vendor/cloud.google.com/go/cloud.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloud is the root of the packages used to access Google Cloud +// Services. See https://godoc.org/cloud.google.com/go for a full list +// of sub-packages. +// +// This package documents how to authorize and authenticate the sub packages. +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..f9d2bef6c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,438 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go new file mode 100644 index 000000000..9ac592691 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go @@ -0,0 +1,48 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "os" + "sync" + "testing" +) + +func TestOnGCE_Stress(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + var last bool + for i := 0; i < 100; i++ { + onGCEOnce = sync.Once{} + + now := OnGCE() + if i > 0 && now != last { + t.Errorf("%d. changed from %v to %v", i, last, now) + } + last = now + } + t.Logf("OnGCE() = %v", last) +} + +func TestOnGCE_Force(t *testing.T) { + onGCEOnce = sync.Once{} + old := os.Getenv(metadataHostEnv) + defer os.Setenv(metadataHostEnv, old) + os.Setenv(metadataHostEnv, "127.0.0.1") + if !OnGCE() { + t.Error("OnGCE() = false; want true") + } +} diff --git a/vendor/cloud.google.com/go/container/container.go b/vendor/cloud.google.com/go/container/container.go new file mode 100644 index 000000000..7cee88acb --- /dev/null +++ b/vendor/cloud.google.com/go/container/container.go @@ -0,0 +1,273 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container contains a Google Container Engine client. +// +// For more information about the API, +// see https://cloud.google.com/container-engine/docs +package container // import "cloud.google.com/go/container" + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/container/v1" + "google.golang.org/api/option" + "google.golang.org/api/transport" +) + +type Type string + +const ( + TypeCreate = Type("createCluster") + TypeDelete = Type("deleteCluster") +) + +type Status string + +const ( + StatusDone = Status("done") + StatusPending = Status("pending") + StatusRunning = Status("running") + StatusError = Status("error") + StatusProvisioning = Status("provisioning") + StatusStopping = Status("stopping") +) + +const prodAddr = "https://container.googleapis.com/" +const userAgent = "gcloud-golang-container/20151008" + +// Client is a Google Container Engine client, which may be used to manage +// clusters with a project. It must be constructed via NewClient. +type Client struct { + projectID string + svc *raw.Service +} + +// NewClient creates a new Google Container Engine client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(raw.CloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + svc, err := raw.New(httpClient) + if err != nil { + return nil, fmt.Errorf("constructing container client: %v", err) + } + svc.BasePath = endpoint + + c := &Client{ + projectID: projectID, + svc: svc, + } + + return c, nil +} + +// Resource is a Google Container Engine cluster resource. +type Resource struct { + // Name is the name of this cluster. The name must be unique + // within this project and zone, and can be up to 40 characters. + Name string + + // Description is the description of the cluster. Optional. + Description string + + // Zone is the Google Compute Engine zone in which the cluster resides. + Zone string + + // Status is the current status of the cluster. It could either be + // StatusError, StatusProvisioning, StatusRunning or StatusStopping. + Status Status + + // Num is the number of the nodes in this cluster resource. + Num int64 + + // APIVersion is the version of the Kubernetes master and kubelets running + // in this cluster. Allowed value is 0.4.2, or leave blank to + // pick up the latest stable release. + APIVersion string + + // Endpoint is the IP address of this cluster's Kubernetes master. + // The endpoint can be accessed at https://username:password@endpoint/. + // See Username and Password fields for the username and password information. + Endpoint string + + // Username is the username to use when accessing the Kubernetes master endpoint. + Username string + + // Password is the password to use when accessing the Kubernetes master endpoint. + Password string + + // ContainerIPv4CIDR is the IP addresses of the container pods in + // this cluster, in CIDR notation (e.g. 1.2.3.4/29). + ContainerIPv4CIDR string + + // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this + // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are + // always in the 10.0.0.0/16 range. + ServicesIPv4CIDR string + + // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). + // If none set, the default type is used while creating a new cluster. + MachineType string + + // This field is ignored. It was removed from the underlying container API in v1. + SourceImage string + + // Created is the creation time of this cluster. + Created time.Time +} + +func resourceFromRaw(c *raw.Cluster) *Resource { + if c == nil { + return nil + } + r := &Resource{ + Name: c.Name, + Description: c.Description, + Zone: c.Zone, + Status: Status(c.Status), + Num: c.InitialNodeCount, + APIVersion: c.InitialClusterVersion, + Endpoint: c.Endpoint, + Username: c.MasterAuth.Username, + Password: c.MasterAuth.Password, + ContainerIPv4CIDR: c.ClusterIpv4Cidr, + ServicesIPv4CIDR: c.ServicesIpv4Cidr, + MachineType: c.NodeConfig.MachineType, + } + r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) + return r +} + +func resourcesFromRaw(c []*raw.Cluster) []*Resource { + r := make([]*Resource, len(c)) + for i, val := range c { + r[i] = resourceFromRaw(val) + } + return r +} + +// Op represents a Google Container Engine API operation. +type Op struct { + // Name is the name of the operation. + Name string + + // Zone is the Google Compute Engine zone. + Zone string + + // This field is ignored. It was removed from the underlying container API in v1. + TargetURL string + + // Type is the operation type. It could be either be TypeCreate or TypeDelete. + Type Type + + // Status is the current status of this operation. It could be either + // OpDone or OpPending. + Status Status +} + +func opFromRaw(o *raw.Operation) *Op { + if o == nil { + return nil + } + return &Op{ + Name: o.Name, + Zone: o.Zone, + Type: Type(o.OperationType), + Status: Status(o.Status), + } +} + +func opsFromRaw(o []*raw.Operation) []*Op { + ops := make([]*Op, len(o)) + for i, val := range o { + ops[i] = opFromRaw(val) + } + return ops +} + +// Clusters returns a list of cluster resources from the specified zone. +// If no zone is specified, it returns all clusters under the user project. +func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) { + if zone == "" { + zone = "-" + } + resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil +} + +// Cluster returns metadata about the specified cluster. +func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) { + resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + return resourceFromRaw(resp), nil +} + +// CreateCluster creates a new cluster with the provided metadata +// in the specified zone. +func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { + panic("not implemented") +} + +// DeleteCluster deletes a cluster. +func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error { + _, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do() + return err +} + +// Operations returns a list of operations from the specified zone. +// If no zone is specified, it looks up for all of the operations +// that are running under the user's project. +func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) { + if zone == "" { + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil + } + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil +} + +// Operation returns an operation. +func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) { + resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + if resp.StatusMessage != "" { + return nil, errors.New(resp.StatusMessage) + } + return opFromRaw(resp), nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go new file mode 100644 index 000000000..16e651087 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -0,0 +1,588 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "log" + "os" + "reflect" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + prodAddr = "datastore.googleapis.com:443" + userAgent = "gcloud-golang-datastore/20160401" +) + +// ScopeDatastore grants permissions to view and/or manage datastore entities +const ScopeDatastore = "https://www.googleapis.com/auth/datastore" + +// protoClient is an interface for *transport.ProtoClient to support injecting +// fake clients in tests. +type protoClient interface { + Call(context.Context, string, proto.Message, proto.Message) error +} + +// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC +// metadata to be sent in each request for server-side traffic management. +type datastoreClient struct { + c pb.DatastoreClient + md metadata.MD +} + +func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { + return &datastoreClient{ + c: pb.NewDatastoreClient(conn), + md: metadata.Pairs(resourcePrefixHeader, "projects/"+projectID), + } +} + +func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { + return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { + return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { + return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { + return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { + return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...) +} + +// Client is a client for reading and writing data in a datastore dataset. +type Client struct { + conn *grpc.ClientConn + client pb.DatastoreClient + endpoint string + dataset string // Called dataset by the datastore API, synonym for project ID. +} + +// NewClient creates a new Client for a given dataset. +// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. +// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value +// to connect to a locally-running datastore emulator. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcd emulator: + // https://cloud.google.com/datastore/docs/tools/datastore-emulator + // If the emulator is available, dial it directly (and don't pass any credentials). + if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(ScopeDatastore), + option.WithUserAgent(userAgent), + } + } + // Warn if we see the legacy emulator environment variables. + if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") + } + if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") + } + if projectID == "" { + projectID = os.Getenv("DATASTORE_PROJECT_ID") + } + if projectID == "" { + return nil, errors.New("datastore: missing project/dataset id") + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: newDatastoreClient(conn, projectID), + dataset: projectID, + }, nil + +} + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// nsKey is the type of the context.Context key to store the datastore +// namespace. +type nsKey struct{} + +// WithNamespace returns a new context that limits the scope its parent +// context with a Datastore namespace. +func WithNamespace(parent context.Context, namespace string) context.Context { + return context.WithValue(parent, nsKey{}, namespace) +} + +// ctxNamespace returns the active namespace for a context. +// It defaults to "" if no namespace was specified. +func ctxNamespace(ctx context.Context) string { + v, _ := ctx.Value(nsKey{}).(string) + return v +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +func keyToProto(k *Key) *pb.Key { + if k == nil { + return nil + } + + // TODO(jbd): Eliminate unrequired allocations. + var path []*pb.Key_PathElement + for { + el := &pb.Key_PathElement{Kind: k.kind} + if k.id != 0 { + el.IdType = &pb.Key_PathElement_Id{k.id} + } else if k.name != "" { + el.IdType = &pb.Key_PathElement_Name{k.name} + } + path = append([]*pb.Key_PathElement{el}, path...) + if k.parent == nil { + break + } + k = k.parent + } + key := &pb.Key{Path: path} + if k.namespace != "" { + key.PartitionId = &pb.PartitionId{ + NamespaceId: k.namespace, + } + } + return key +} + +// protoToKey decodes a protocol buffer representation of a key into an +// equivalent *Key object. If the key is invalid, protoToKey will return the +// invalid key along with ErrInvalidKey. +func protoToKey(p *pb.Key) (*Key, error) { + var key *Key + var namespace string + if partition := p.PartitionId; partition != nil { + namespace = partition.NamespaceId + } + for _, el := range p.Path { + key = &Key{ + namespace: namespace, + kind: el.Kind, + id: el.GetId(), + name: el.GetName(), + parent: key, + } + } + if !key.valid() { // Also detects key == nil. + return key, ErrInvalidKey + } + return key, nil +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(keys []*Key) []*pb.Key { + ret := make([]*pb.Key, len(keys)) + for i, k := range keys { + ret[i] = keyToProto(k) + } + return ret +} + +// multiKeyToProto is a batch version of keyToProto. +func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { + hasErr := false + ret := make([]*Key, len(keys)) + err := make(MultiError, len(keys)) + for i, k := range keys { + ret[i], err[i] = protoToKey(k) + if err[i] != nil { + hasErr = true + } + } + if hasErr { + return nil, err + } + return ret, nil +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +// +// TODO(djd): multiArg is very confusing. Fold this logic into the +// relevant Put/Get methods to make the logic less opaque. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Close closes the Client. +func (c *Client) Close() { + c.conn.Close() +} + +// Get loads the entity stored for key into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { + if dst == nil { // get catches nil interfaces; we need to catch nil ptr here + return ErrInvalidEntityType + } + err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { + return c.get(ctx, keys, dst, nil) +} + +func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + + // Sanity checks + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(keys) != v.Len() { + return errors.New("datastore: keys and dst slices have different length") + } + if len(keys) == 0 { + return nil + } + + // Go through keys, validate them, serialize then, and create a dict mapping them to their index + multiErr, any := make(MultiError, len(keys)), false + keyMap := make(map[string]int) + pbKeys := make([]*pb.Key, len(keys)) + for i, k := range keys { + if !k.valid() { + multiErr[i] = ErrInvalidKey + any = true + } else { + keyMap[k.String()] = i + pbKeys[i] = keyToProto(k) + } + } + if any { + return multiErr + } + req := &pb.LookupRequest{ + ProjectId: c.dataset, + Keys: pbKeys, + ReadOptions: opts, + } + resp, err := c.client.Lookup(ctx, req) + if err != nil { + return err + } + if len(resp.Deferred) > 0 { + // TODO(jbd): Assess whether we should retry the deferred keys. + return errors.New("datastore: some entities temporarily unavailable") + } + if len(keys) != len(resp.Found)+len(resp.Missing) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + for _, e := range resp.Found { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + index := keyMap[k.String()] + elem := v.Index(index) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + if err := loadEntity(elem.Interface(), e.Entity); err != nil { + multiErr[index] = err + any = true + } + } + for _, e := range resp.Missing { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + multiErr[keyMap[k.String()]] = ErrNoSuchEntity + any = true + } + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { + k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + + // Make the request. + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + + // Copy any newly minted keys into the returned keys. + ret := make([]*Key, len(keys)) + for i, key := range keys { + if key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = key + } + } + return ret, nil +} + +func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(keys) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(keys) == 0 { + return nil, nil + } + if err := multiValid(keys); err != nil { + return nil, err + } + mutations := make([]*pb.Mutation, 0, len(keys)) + for i, k := range keys { + elem := v.Index(i) + // Two cases where we need to take the address: + // 1) multiArgTypePropertyLoadSaver => &elem implements PLS + // 2) multiArgTypeStruct => saveEntity needs *struct + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + p, err := saveEntity(k, elem.Interface()) + if err != nil { + return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err) + } + var mut *pb.Mutation + if k.Incomplete() { + mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}} + } else { + mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}} + } + mutations = append(mutations, mut) + } + return mutations, nil +} + +// Delete deletes the entity for the given key. +func (c *Client) Delete(ctx context.Context, key *Key) error { + err := c.DeleteMulti(ctx, []*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + _, err = c.client.Commit(ctx, req) + return err +} + +func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { + mutations := make([]*pb.Mutation, 0, len(keys)) + for _, k := range keys { + if k.Incomplete() { + return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) + } + mutations = append(mutations, &pb.Mutation{ + Operation: &pb.Mutation_Delete{keyToProto(k)}, + }) + } + return mutations, nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go new file mode 100644 index 000000000..7815df4bb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -0,0 +1,1688 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return &Key{ + kind: "kind", + name: stringID, + id: 0, + parent: parent, + } +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} +) + +type B0 struct { + B []byte `datastore:",noindex"` +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob `datastore:",noindex"` +} + +type B3 struct { + B []myByte `datastore:",noindex"` +} + +type B4 struct { + B [][]byte +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type c4 struct { + C string +} + +type E struct{} + +type G0 struct { + G GeoPoint +} + +type G1 struct { + G []GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type N3 struct { + C3 `datastore:"red"` +} + +type N4 struct { + c4 +} + +type N5 struct { + c4 `datastore:"red"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-,"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type Doubler struct { + S string + I int64 + B bool +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I". + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "string must be noindex", + &PropertyList{ + Property{Name: "B", Value: strings.Repeat("x", 1501), NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "slice of []byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + []byte("short"), + makeUint8Slice(1501), + }, NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "slice of string must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + "short", + strings.Repeat("x", 1501), + }, NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Order is sorted as per byName. + Property{Name: "C", Value: int64(3), NoIndex: true}, + Property{Name: "D", Value: int64(4), NoIndex: false}, + Property{Name: "E", Value: int64(5), NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: true}, + Property{Name: "a", Value: int64(1), NoIndex: true}, + Property{Name: "b", Value: []interface{}{int64(21), int64(22), int64(23)}, NoIndex: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "save props load tagged", + &PropertyList{ + Property{Name: "A", Value: int64(11), NoIndex: true}, + Property{Name: "a", Value: int64(12), NoIndex: true}, + }, + &Tagged{A: 12}, + "", + `cannot load field "A"`, + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{}, + "struct tag has repeated property name", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false}, + Property{Name: "B", Value: nil, NoIndex: false}, + Property{Name: "S", Value: nil, NoIndex: false}, + Property{Name: "F", Value: nil, NoIndex: false}, + Property{Name: "K", Value: nil, NoIndex: false}, + Property{Name: "T", Value: nil, NoIndex: false}, + Property{Name: "J", Value: []interface{}{nil, int64(7), nil}, NoIndex: false}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "save outer-equivalent load outer", + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "Blue.I", Value: int64(0), NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(0), int64(0), int64(0), int64(0)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blu0", "blu1", "blu2", "blu3"}, NoIndex: false}, + Property{Name: "Blue.Other", Value: "", NoIndex: false}, + Property{Name: "Blue.S", Value: "bleu", NoIndex: false}, + Property{Name: "green.I", Value: int64(0), NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: []interface{}{int64(0), int64(0), int64(0)}, NoIndex: false}, + Property{Name: "green.Nonymous.S", Value: []interface{}{"verde0", "verde1", "verde2"}, NoIndex: false}, + Property{Name: "green.Other", Value: "", NoIndex: false}, + Property{Name: "green.S", Value: "vert", NoIndex: false}, + Property{Name: "red.I", Value: int64(0), NoIndex: false}, + Property{Name: "red.Nonymous.I", Value: []interface{}{int64(0), int64(0)}, NoIndex: false}, + Property{Name: "red.Nonymous.S", Value: []interface{}{"rosso0", "rosso1"}, NoIndex: false}, + Property{Name: "red.Other", Value: "", NoIndex: false}, + Property{Name: "red.S", Value: "rouge", NoIndex: false}, + }, + "", + "", + }, + { + "anonymous field with tag", + &N3{ + C3: C3{C: "s"}, + }, + &PropertyList{ + Property{Name: "red.C", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field", + &N4{ + c4: c4{C: "s"}, + }, + new(PropertyList), + "", + "", + }, + { + "unexported anonymous field with tag", + &N5{ + c4: c4{C: "s"}, + }, + new(PropertyList), + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: []interface{}{int64(10), int64(11), int64(12), int64(13)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(20), int64(21)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blau0", "blau1", "blau2"}, NoIndex: false}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "A.X", Value: "", NoIndex: true}, + Property{Name: "A.Y", Value: "", NoIndex: true}, + Property{Name: "B.X", Value: "", NoIndex: true}, + Property{Name: "B.Y", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo.W", Value: int64(0), NoIndex: false}, + Property{Name: "foo.X", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + nil, + "recursive struct", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + nil, + "recursive struct", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, + { + "repeated property names", + &PropertyList{ + Property{Name: "A", Value: ""}, + Property{Name: "A", Value: ""}, + }, + nil, + "duplicate Property", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntity(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + if pl, ok := got.(*PropertyList); ok { + // Sort by name to make sure we have a deterministic order. + sort.Stable(byName(*pl)) + } + equal := false + if gotT, ok := got.(*T); ok { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + equal = gotT.T.Equal(tc.want.(*T).T) + } else { + equal = reflect.DeepEqual(got, tc.want) + } + if !equal { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) + t.Logf("intermediate proto (%s):\n%s", tc.desc, proto.MarshalTextString(p)) + continue + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !reflect.DeepEqual(test.q, test.exp) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestPutMultiTypes(t *testing.T) { + ctx := context.Background() + type S struct { + A int + B string + } + + testCases := []struct { + desc string + src interface{} + wantErr bool + }{ + // Test cases to check each of the valid input types for src. + // Each case has the same elements. + { + desc: "type []struct", + src: []S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []*struct", + src: []*S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []interface{} with PLS elems", + src: []interface{}{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []interface{} with struct ptr elems", + src: []interface{}{ + &S{1, "one"}, &S{2, "two"}, + }, + }, + { + desc: "type []PropertyLoadSaver{}", + src: []PropertyLoadSaver{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []P (non-pointer, *P implements PropertyLoadSaver)", + src: []PropertyList{ + {Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + {Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + // Test some invalid cases. + { + desc: "type []interface{} with struct elems", + src: []interface{}{ + S{1, "one"}, S{2, "two"}, + }, + wantErr: true, + }, + { + desc: "PropertyList", + src: PropertyList{ + Property{Name: "A", Value: 1}, + Property{Name: "B", Value: "one"}, + }, + wantErr: true, + }, + { + desc: "type []int", + src: []int{1, 2}, + wantErr: true, + }, + { + desc: "not a slice", + src: S{1, "one"}, + wantErr: true, + }, + } + + // Use the same keys and expected entities for all tests. + keys := []*Key{ + NewKey(ctx, "testKind", "first", 0, nil), + NewKey(ctx, "testKind", "second", 0, nil), + } + want := []*pb.Mutation{ + {Operation: &pb.Mutation_Upsert{&pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{1}}, + "B": {ValueType: &pb.Value_StringValue{"one"}}, + }, + }}}, + {Operation: &pb.Mutation_Upsert{&pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{2}}, + "B": {ValueType: &pb.Value_StringValue{"two"}}, + }, + }}}, + } + + for _, tt := range testCases { + // Set up a fake client which captures upserts. + var got []*pb.Mutation + client := &Client{ + client: &fakeClient{ + commitFn: func(req *pb.CommitRequest) (*pb.CommitResponse, error) { + got = req.Mutations + return &pb.CommitResponse{}, nil + }, + }, + } + + _, err := client.PutMulti(ctx, keys, tt.src) + if err != nil { + if !tt.wantErr { + t.Errorf("%s: error %v", tt.desc, err) + } + continue + } + if tt.wantErr { + t.Errorf("%s: wanted error, but none returned", tt.desc) + continue + } + if len(got) != len(want) { + t.Errorf("%s: got %d entities, want %d", tt.desc, len(got), len(want)) + continue + } + for i, e := range got { + if !proto.Equal(e, want[i]) { + t.Logf("%s: entity %d doesn't match\ngot: %v\nwant: %v", tt.desc, i, e, want[i]) + } + } + } +} + +func TestNoIndexOnSliceProperties(t *testing.T) { + // Check that ExcludeFromIndexes is set on the inner elements, + // rather than the top-level ArrayValue value. + ctx := context.Background() + pl := PropertyList{ + Property{ + Name: "repeated", + Value: []interface{}{ + 123, + false, + "short", + strings.Repeat("a", 1503), + }, + NoIndex: true, + }, + } + key := NewKey(ctx, "dummy", "dummy", 0, nil) + + entity, err := saveEntity(key, &pl) + if err != nil { + t.Fatalf("saveEntity: %v", err) + } + + want := &pb.Value{ + ValueType: &pb.Value_ArrayValue{&pb.ArrayValue{[]*pb.Value{ + {ValueType: &pb.Value_IntegerValue{123}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_BooleanValue{false}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{"short"}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{strings.Repeat("a", 1503)}, ExcludeFromIndexes: true}, + }}}, + } + if got := entity.Properties["repeated"]; !proto.Equal(got, want) { + t.Errorf("Entity proto differs\ngot: %v\nwant: %v", got, want) + } +} + +type byName PropertyList + +func (s byName) Len() int { return len(s) } +func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go new file mode 100644 index 000000000..600c1413c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -0,0 +1,320 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package datastore provides a client for Google Cloud Datastore. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - *Key, + - GeoPoint, + - time.Time (stored with microsecond precision), + - structs whose fields are all valid value types, + - slices of any of the above. + +Slices of structs are valid, as are structs that contain slices. However, if +one struct contains another, then at most one of those can be repeated. This +disqualifies recursively defined struct types: any struct T that (directly or +indirectly) contains a []T. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func main() { + ctx := context.Background() + + // Create a datastore client. In a typical application, you would create + // a single client which is reused for every datastore operation. + dsClient, err := datastore.NewClient(ctx, "my-project") + if err != nil { + // Handle error. + } + + k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) + e := new(Entity) + if err := dsClient.Get(ctx, k, e); err != nil { + // Handle error. + } + + old := e.Value + e.Value = "Hello World!" + + if _, err := dsClient.Put(ctx, k, e); err != nil { + // Handle error. + } + + fmt.Printf("Updated value from %q to %q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return a +datastore.MultiError when encountering partial failure. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different calls to datastore. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). Fields may have a `datastore:"name,options"` tag. The tag +name is the property name, which must be one or more valid Go identifiers +joined by ".", but may start with a lower case letter. An empty tag name means +to just use the field name. A "-" tag name means that the datastore will +ignore that field. If options is "noindex" then the field will not be indexed. +If the options is "" then the comma may be omitted. There are no other +recognized options. + +All fields are indexed by default. Strings or byte slices longer than 1500 +bytes cannot be indexed; fields used to store long strings and byte slices must +be tagged with "noindex" or they will cause Put operations to fail. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are flattened. For example, given these definitions: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 + } + +then an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool + } + +If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the +equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + } + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func printWidgets(ctx context.Context, client *datastore.Client) { + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + for t := dsClient.Run(ctx, q); ; { + var x Widget + key, err := t.Next(&x) + if err == datastore.Done { + break + } + if err != nil { + // Handle error. + } + fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x) + } + } + + +Transactions + +Client.RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func incCount(ctx context.Context, client *datastore.Client) { + var count int + key := datastore.NewKey(ctx, "Counter", "singleton", 0, nil) + err := dsClient.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + }, nil) + if err != nil { + // Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) + } +*/ +package datastore // import "cloud.google.com/go/datastore" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" diff --git a/vendor/cloud.google.com/go/datastore/errors.go b/vendor/cloud.google.com/go/datastore/errors.go new file mode 100644 index 000000000..3077f80d3 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/errors.go @@ -0,0 +1,47 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides error functions for common API failure modes. + +package datastore + +import ( + "fmt" +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go new file mode 100644 index 000000000..ddef4ab5d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -0,0 +1,235 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "log" + "time" + + "cloud.google.com/go/datastore" + "golang.org/x/net/context" +) + +// TODO(jbd): Document other authorization methods and refer to them here. +func Example_auth() *datastore.Client { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + // Use the client (see other examples). + return client +} + +func ExampleGet() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) + article := &Article{} + if err := client.Get(ctx, key, article); err != nil { + log.Fatal(err) + } +} + +func ExamplePut() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + newKey := datastore.NewIncompleteKey(ctx, "Article", nil) + _, err = client.Put(ctx, newKey, &Article{ + Title: "The title of the article", + Description: "The description of the article...", + Body: "...", + Author: datastore.NewKey(ctx, "Author", "jbd", 0, nil), + PublishedAt: time.Now(), + }) + if err != nil { + log.Fatal(err) + } +} + +func ExampleDelete() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) + if err := client.Delete(ctx, key); err != nil { + log.Fatal(err) + } +} + +type Post struct { + Title string + PublishedAt time.Time + Comments int +} + +func ExampleGetMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + datastore.NewKey(ctx, "Post", "post3", 0, nil), + } + posts := make([]Post, 3) + if err := client.GetMulti(ctx, keys, posts); err != nil { + log.Println(err) + } +} + +func ExamplePutMulti_slice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + } + + // PutMulti with a Post slice. + posts := []*Post{ + {Title: "Post 1", PublishedAt: time.Now()}, + {Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } +} + +func ExamplePutMulti_interfaceSlice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), + } + + // PutMulti with an empty interface slice. + posts := []interface{}{ + &Post{Title: "Post 1", PublishedAt: time.Now()}, + &Post{Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } +} + +func ExampleQuery() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + // Count the number of the post entities. + q := datastore.NewQuery("Post") + n, err := client.Count(ctx, q) + if err != nil { + log.Fatal(err) + } + log.Printf("There are %d posts.", n) + + // List the posts published since yesterday. + yesterday := time.Now().Add(-24 * time.Hour) + q = datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) + it := client.Run(ctx, q) + // Use the iterator. + _ = it + + // Order the posts by the number of comments they have recieved. + datastore.NewQuery("Post").Order("-Comments") + + // Start listing from an offset and limit the results. + datastore.NewQuery("Post").Offset(20).Limit(10) +} + +func ExampleTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + const retries = 3 + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + key := datastore.NewKey(ctx, "counter", "CounterA", 0, nil) + + for i := 0; i < retries; i++ { + tx, err := client.NewTransaction(ctx) + if err != nil { + break + } + + var c Counter + if err := tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { + break + } + c.Count++ + if _, err := tx.Put(key, &c); err != nil { + break + } + + // Attempt to commit the transaction. If there's a conflict, try again. + if _, err := tx.Commit(); err != datastore.ErrConcurrentTransaction { + break + } + } + +} diff --git a/vendor/cloud.google.com/go/datastore/examples_test.go b/vendor/cloud.google.com/go/datastore/examples_test.go new file mode 100644 index 000000000..6d9d87836 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/examples_test.go @@ -0,0 +1,731 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/datastore" + "golang.org/x/net/context" +) + +type Task struct { + Category string + Done bool + Priority int + Description string `datastore:",noindex"` + PercentComplete float64 + Created time.Time + Tags []string + Collaborators []string +} + +func ExampleNewIncompleteKey() { + ctx := context.Background() + // [START incomplete_key] + taskKey := datastore.NewIncompleteKey(ctx, "Task", nil) + // [END incomplete_key] + _ = taskKey // Use the task key for datastore operations. +} + +func ExampleNewKey() { + ctx := context.Background() + // [START named_key] + taskKey := datastore.NewKey(ctx, "Task", "sampletask", 0, nil) + // [END named_key] + _ = taskKey // Use the task key for datastore operations. +} + +func ExampleNewKey_withParent() { + ctx := context.Background() + // [START key_with_parent] + parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, nil) + taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, parentKey) + // [END key_with_parent] + _ = taskKey // Use the task key for datastore operations. +} + +func ExampleNewKey_withMultipleParents() { + ctx := context.Background() + // [START key_with_multilevel_parent] + userKey := datastore.NewKey(ctx, "User", "alice", 0, nil) + parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, userKey) + taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, parentKey) + // [END key_with_multilevel_parent] + _ = taskKey // Use the task key for datastore operations. +} + +func ExampleClient_Put() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START entity_with_parent] + parentKey := datastore.NewKey(ctx, "TaskList", "default", 0, nil) + key := datastore.NewIncompleteKey(ctx, "Task", parentKey) + + task := Task{ + Category: "Personal", + Done: false, + Priority: 4, + Description: "Learn Cloud Datastore", + } + + // A complete key is assigned to the entity when it is Put. + var err error + key, err = client.Put(ctx, key, &task) + // [END entity_with_parent] + _ = err // Make sure you check err. +} + +func Example_properties() { + // [START properties] + type Task struct { + Category string + Done bool + Priority int + Description string `datastore:",noindex"` + PercentComplete float64 + Created time.Time + } + task := &Task{ + Category: "Personal", + Done: false, + Priority: 4, + Description: "Learn Cloud Datastore", + PercentComplete: 10.0, + Created: time.Now(), + } + // [END properties] + _ = task // Use the task in a datastore Put operation. +} + +func Example_sliceProperties() { + // [START array_value] + type Task struct { + Tags []string + Collaborators []string + } + task := &Task{ + Tags: []string{"fun", "programming"}, + Collaborators: []string{"alice", "bob"}, + } + // [END array_value] + _ = task // Use the task in a datastore Put operation. +} + +func Example_basicEntity() { + // [START basic_entity] + type Task struct { + Category string + Done bool + Priority float64 + Description string `datastore:",noindex"` + PercentComplete float64 + Created time.Time + } + task := &Task{ + Category: "Personal", + Done: false, + Priority: 4, + Description: "Learn Cloud Datastore", + PercentComplete: 10.0, + Created: time.Now(), + } + // [END basic_entity] + _ = task // Use the task in a datastore Put operation. +} + +func ExampleClient_Put_upsert() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + task := &Task{} // Populated with appropriate data. + key := datastore.NewIncompleteKey(ctx, "Task", nil) + // [START upsert] + key, err := client.Put(ctx, key, task) + // [END upsert] + _ = err // Make sure you check err. + _ = key // key is the complete key for the newly stored task +} + +func ExampleTransaction_insert() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + task := Task{} // Populated with appropriate data. + taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil) + // [START insert] + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + // We first check that there is no entity stored with the given key. + var empty Task + if err := tx.Get(taskKey, &empty); err != datastore.ErrNoSuchEntity { + return err + } + // If there was no matching entity, store it now. + _, err := tx.Put(taskKey, &task) + return err + }) + // [END insert] + _ = err // Make sure you check err. +} + +func ExampleClient_Get() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil) + // [START lookup] + var task Task + err := client.Get(ctx, taskKey, &task) + // [END lookup] + _ = err // Make sure you check err. +} + +func ExampleTransaction_update() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + taskKey := datastore.NewKey(ctx, "Task", "sampleTask", 0, nil) + // [START update] + tx, err := client.NewTransaction(ctx) + if err != nil { + log.Fatalf("client.NewTransaction: %v", err) + } + var task Task + if err := tx.Get(taskKey, &task); err != nil { + log.Fatalf("tx.Get: %v", err) + } + task.Priority = 5 + if _, err := tx.Put(taskKey, task); err != nil { + log.Fatalf("tx.Put: %v", err) + } + if _, err := tx.Commit(); err != nil { + log.Fatalf("tx.Commit: %v", err) + } + // [END update] +} + +func ExampleClient_Delete() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + key := datastore.NewKey(ctx, "Task", "sampletask", 0, nil) + // [START delete] + err := client.Delete(ctx, key) + // [END delete] + _ = err // Make sure you check err. +} + +func ExampleClient_PutMulti() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START batch_upsert] + tasks := []*Task{ + { + Category: "Personal", + Done: false, + Priority: 4, + Description: "Learn Cloud Datastore", + }, + { + Category: "Personal", + Done: false, + Priority: 5, + Description: "Integrate Cloud Datastore", + }, + } + keys := []*datastore.Key{ + datastore.NewIncompleteKey(ctx, "Task", nil), + datastore.NewIncompleteKey(ctx, "Task", nil), + } + + keys, err := client.PutMulti(ctx, keys, tasks) + // [END batch_upsert] + _ = err // Make sure you check err. + _ = keys // keys now has the complete keys for the newly stored tasks. +} + +func ExampleClient_GetMulti() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + var taskKeys []*datastore.Key // Populated with incomplete keys. + // [START batch_lookup] + var tasks []*Task + err := client.GetMulti(ctx, taskKeys, &tasks) + // [END batch_lookup] + _ = err // Make sure you check err. +} + +func ExampleClient_DeleteMulti() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + var taskKeys []*datastore.Key // Populated with incomplete keys. + // [START batch_delete] + err := client.DeleteMulti(ctx, taskKeys) + // [END batch_delete] + _ = err // Make sure you check err. +} + +func ExampleQuery_basic() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START basic_query] + query := datastore.NewQuery("Task"). + Filter("Done =", false). + Filter("Priority >=", 4). + Order("-Priority") + // [END basic_query] + // [START run_query] + it := client.Run(ctx, query) + for { + var task Task + _, err := it.Next(&task) + if err == datastore.Done { + break + } + if err != nil { + log.Fatalf("Error fetching next task: %v", err) + } + fmt.Printf("Task %q, Priority %d\n", task.Description, task.Priority) + } + // [END run_query] +} + +func ExampleQuery_propertyFilter() { + // [START property_filter] + query := datastore.NewQuery("Task").Filter("Done =", false) + // [END property_filter] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_compositeFilter() { + // [START composite_filter] + query := datastore.NewQuery("Task").Filter("Done =", false).Filter("Priority =", 4) + // [END composite_filter] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_keyFilter() { + ctx := context.Background() + // [START key_filter] + key := datastore.NewKey(ctx, "Task", "someTask", 0, nil) + query := datastore.NewQuery("Task").Filter("__key__ >", key) + // [END key_filter] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_sortAscending() { + // [START ascending_sort] + query := datastore.NewQuery("Task").Order("created") + // [END ascending_sort] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_sortDescending() { + // [START descending_sort] + query := datastore.NewQuery("Task").Order("-created") + // [END descending_sort] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_sortMulti() { + // [START multi_sort] + query := datastore.NewQuery("Task").Order("-priority").Order("created") + // [END multi_sort] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_kindless() { + var lastSeenKey *datastore.Key + // [START kindless_query] + query := datastore.NewQuery("").Filter("__key__ >", lastSeenKey) + // [END kindless_query] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_Ancestor() { + ctx := context.Background() + // [START ancestor_query] + ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil) + query := datastore.NewQuery("Task").Ancestor(ancestor) + // [END ancestor_query] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_Project() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START projection_query] + query := datastore.NewQuery("Task").Project("Priority", "PercentComplete") + // [END projection_query] + // [START run_query_projection] + var priorities []int + var percents []float64 + it := client.Run(ctx, query) + for { + var task Task + if _, err := it.Next(&task); err == datastore.Done { + break + } else if err != nil { + log.Fatal(err) + } + priorities = append(priorities, task.Priority) + percents = append(percents, task.PercentComplete) + } + // [END run_query_projection] +} + +func ExampleQuery_KeysOnly() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START keys_only_query] + query := datastore.NewQuery("Task").KeysOnly() + // [END keys_only_query] + // [START run_keys_only_query] + keys, err := client.GetAll(ctx, query, nil) + // [END run_keys_only_query] + _ = err // Make sure you check err. + _ = keys // Keys contains keys for all stored tasks. +} + +func ExampleQuery_Distinct() { + // [START distinct_query] + query := datastore.NewQuery("Task"). + Project("Priority", "PercentComplete"). + Distinct(). + Order("Category").Order("Priority") + // [END distinct_query] + _ = query // Use client.Run or client.GetAll to execute the query. + + // [START distinct_on_query] + // DISTINCT ON not supported in Go API + // [END distinct_on_query] +} + +func ExampleQuery_Filter_arrayInequality() { + // [START array_value_inequality_range] + query := datastore.NewQuery("Task"). + Filter("Tag >", "learn"). + Filter("Tag <", "math") + // [END array_value_inequality_range] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_Filter_arrayEquality() { + // [START array_value_equality] + query := datastore.NewQuery("Task"). + Filter("Tag =", "fun"). + Filter("Tag =", "programming") + // [END array_value_equality] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_Filter_inequality() { + // [START inequality_range] + query := datastore.NewQuery("Task"). + Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)). + Filter("Created <", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + // [END inequality_range] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_Filter_invalidInequality() { + // [START inequality_invalid] + query := datastore.NewQuery("Task"). + Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)). + Filter("Priority >", 3) + // [END inequality_invalid] + _ = query // The query is invalid. +} + +func ExampleQuery_Filter_mixed() { + // [START equal_and_inequality_range] + query := datastore.NewQuery("Task"). + Filter("Priority =", 4). + Filter("Done =", false). + Filter("Created >", time.Date(1990, 1, 1, 0, 0, 0, 0, time.UTC)). + Filter("Created <", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + // [END equal_and_inequality_range] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_inequalitySort() { + // [START inequality_sort] + query := datastore.NewQuery("Task"). + Filter("Priority >", 3). + Order("Priority"). + Order("Created") + // [END inequality_sort] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_invalidInequalitySortA() { + // [START inequality_sort_invalid_not_same] + query := datastore.NewQuery("Task"). + Filter("Priority >", 3). + Order("Created") + // [END inequality_sort_invalid_not_same] + _ = query // The query is invalid. +} + +func ExampleQuery_invalidInequalitySortB() { + // [START inequality_sort_invalid_not_first] + query := datastore.NewQuery("Task"). + Filter("Priority >", 3). + Order("Created"). + Order("Priority") + // [END inequality_sort_invalid_not_first] + _ = query // The query is invalid. +} + +func ExampleQuery_Limit() { + // [START limit] + query := datastore.NewQuery("Task").Limit(5) + // [END limit] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleIterator_Cursor() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + cursorStr := "" + // [START cursor_paging] + const pageSize = 5 + query := datastore.NewQuery("Tasks").Limit(pageSize) + if cursorStr != "" { + cursor, err := datastore.DecodeCursor(cursorStr) + if err != nil { + log.Fatalf("Bad cursor %q: %v", cursorStr, err) + } + query = query.Start(cursor) + } + + // Read the tasks. + var tasks []Task + var task Task + it := client.Run(ctx, query) + _, err := it.Next(&task) + for err == nil { + tasks = append(tasks, task) + _, err = it.Next(&task) + } + if err != datastore.Done { + log.Fatalf("Failed fetching results: %v", err) + } + + // Get the cursor for the next page of results. + nextCursor, err := it.Cursor() + // [END cursor_paging] + _ = err // Check the error. + _ = nextCursor // Use nextCursor.String as the next page's token. +} + +func ExampleQuery_EventualConsistency() { + ctx := context.Background() + // [START eventual_consistent_query] + ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil) + query := datastore.NewQuery("Task").Ancestor(ancestor).EventualConsistency() + // [END eventual_consistent_query] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func ExampleQuery_unindexed() { + // [START unindexed_property_query] + query := datastore.NewQuery("Tasks").Filter("Description =", "A task description") + // [END unindexed_property_query] + _ = query // Use client.Run or client.GetAll to execute the query. +} + +func Example_explodingProperties() { + // [START exploding_properties] + task := &Task{ + Tags: []string{"fun", "programming", "learn"}, + Collaborators: []string{"alice", "bob", "charlie"}, + Created: time.Now(), + } + // [END exploding_properties] + _ = task // Use the task in a datastore Put operation. +} + +func Example_Transaction() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + var to, from *datastore.Key + // [START transactional_update] + type BankAccount struct { + Balance int + } + + const amount = 50 + keys := []*datastore.Key{to, from} + tx, err := client.NewTransaction(ctx) + if err != nil { + log.Fatalf("client.NewTransaction: %v", err) + } + accs := make([]BankAccount, 2) + if err := tx.GetMulti(keys, accs); err != nil { + tx.Rollback() + log.Fatalf("tx.GetMulti: %v", err) + } + accs[0].Balance += amount + accs[1].Balance -= amount + if _, err := tx.PutMulti(keys, accs); err != nil { + tx.Rollback() + log.Fatalf("tx.PutMulti: %v", err) + } + if _, err = tx.Commit(); err != nil { + log.Fatalf("tx.Commit: %v", err) + } + // [END transactional_update] +} + +func Example_Client_RunInTransaction() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + var to, from *datastore.Key + // [START transactional_retry] + type BankAccount struct { + Balance int + } + + const amount = 50 + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + keys := []*datastore.Key{to, from} + accs := make([]BankAccount, 2) + if err := tx.GetMulti(keys, accs); err != nil { + return err + } + accs[0].Balance += amount + accs[1].Balance -= amount + _, err := tx.PutMulti(keys, accs) + return err + }) + // [END transactional_retry] + _ = err // Check error. +} + +func ExampleTransaction_getOrCreate() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + key := datastore.NewKey(ctx, "Task", "sampletask", 0, nil) + // [START transactional_get_or_create] + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var task Task + if err := tx.Get(key, &task); err != datastore.ErrNoSuchEntity { + return err + } + _, err := tx.Put(key, &Task{ + Category: "Personal", + Done: false, + Priority: 4, + Description: "Learn Cloud Datastore", + }) + return err + }) + // [END transactional_get_or_create] + _ = err // Check error. +} + +func ExampleTransaction_runQuery() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START transactional_single_entity_group_read_only] + tx, err := client.NewTransaction(ctx) + if err != nil { + log.Fatalf("client.NewTransaction: %v", err) + } + defer tx.Rollback() // Transaction only used for read. + + ancestor := datastore.NewKey(ctx, "TaskList", "default", 0, nil) + query := datastore.NewQuery("Task").Ancestor(ancestor).Transaction(tx) + var tasks []Task + _, err = client.GetAll(ctx, query, &tasks) + // [END transactional_single_entity_group_read_only] + _ = err // Check error. +} + +func Example_metadataNamespaces() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START namespace_run_query] + const ( + startNamespace = "g" + endNamespace = "h" + ) + query := datastore.NewQuery("__namespace__"). + Filter("__key__ >=", startNamespace). + Filter("__key__ <", endNamespace). + KeysOnly() + keys, err := client.GetAll(ctx, query, nil) + if err != nil { + log.Fatalf("client.GetAll: %v", err) + } + + namespaces := make([]string, 0, len(keys)) + for _, k := range keys { + namespaces = append(namespaces, k.Name()) + } + // [END namespace_run_query] +} + +func Example_metadataKinds() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START kind_run_query] + query := datastore.NewQuery("__kind__").KeysOnly() + keys, err := client.GetAll(ctx, query, nil) + if err != nil { + log.Fatalf("client.GetAll: %v", err) + } + + kinds := make([]string, 0, len(keys)) + for _, k := range keys { + kinds = append(kinds, k.Name()) + } + // [END kind_run_query] +} + +func Example_metadataProperties() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START property_run_query] + query := datastore.NewQuery("__property__").KeysOnly() + keys, err := client.GetAll(ctx, query, nil) + if err != nil { + log.Fatalf("client.GetAll: %v", err) + } + + props := make(map[string][]string) // Map from kind to slice of properties. + for _, k := range keys { + prop := k.Name() + kind := k.Parent().Name() + props[kind] = append(props[kind], prop) + } + // [END property_run_query] +} + +func Example_metadataPropertiesForKind() { + ctx := context.Background() + client, _ := datastore.NewClient(ctx, "my-proj") + // [START property_by_kind_run_query] + kindKey := datastore.NewKey(ctx, "__kind__", "Task", 0, nil) + query := datastore.NewQuery("__property__").Ancestor(kindKey) + + type Prop struct { + Repr []string `datastore:"property_representation"` + } + + var props []Prop + keys, err := client.GetAll(ctx, query, &props) + // [END property_by_kind_run_query] + _ = err // Check error. + _ = keys // Use keys to find property names, and props for their representations. +} diff --git a/vendor/cloud.google.com/go/datastore/integration_test.go b/vendor/cloud.google.com/go/datastore/integration_test.go new file mode 100644 index 000000000..9e286d298 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/integration_test.go @@ -0,0 +1,993 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +// TODO(djd): Make test entity clean up more robust: some test entities may +// be left behind if tests are aborted, the transport fails, etc. + +// suffix is a timestamp-based suffix which is appended to key names, +// particularly for the root keys of entity groups. This reduces flakiness +// when the tests are run in parallel. +var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano()) + +func newClient(ctx context.Context, t *testing.T) *Client { + ts := testutil.TokenSource(ctx, ScopeDatastore) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + return client +} + +func TestBasics(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + S string + T time.Time + } + + x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} + k, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsX", nil), &x0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + x1 := X{} + err = client.Get(ctx, k, &x1) + if err != nil { + t.Errorf("client.Get: %v", err) + } + err = client.Delete(ctx, k) + if err != nil { + t.Errorf("client.Delete: %v", err) + } + if !reflect.DeepEqual(x0, x1) { + t.Errorf("compare: x0=%v, x1=%v", x0, x1) + } +} + +func TestListValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + p0 := PropertyList{ + {Name: "L", Value: []interface{}{int64(12), "string", true}}, + } + k, err := client.Put(ctx, NewIncompleteKey(ctx, "ListValue", nil), &p0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + var p1 PropertyList + if err := client.Get(ctx, k, &p1); err != nil { + t.Errorf("client.Get: %v", err) + } + if !reflect.DeepEqual(p0, p1) { + t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) + } + if err = client.Delete(ctx, k); err != nil { + t.Errorf("client.Delete: %v", err) + } +} + +func TestGetMulti(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + } + p := NewKey(ctx, "X", "x"+suffix, 0, nil) + + cases := []struct { + key *Key + put bool + }{ + {key: NewKey(ctx, "X", "item1", 0, p), put: true}, + {key: NewKey(ctx, "X", "item2", 0, p), put: false}, + {key: NewKey(ctx, "X", "item3", 0, p), put: false}, + {key: NewKey(ctx, "X", "item4", 0, p), put: true}, + } + + var src, dst []*X + var srcKeys, dstKeys []*Key + for _, c := range cases { + dst = append(dst, &X{}) + dstKeys = append(dstKeys, c.key) + if c.put { + src = append(src, &X{}) + srcKeys = append(srcKeys, c.key) + } + } + if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { + t.Error(err) + } + err := client.GetMulti(ctx, dstKeys, dst) + if err == nil { + t.Errorf("client.GetMulti got %v, expected error", err) + } + e, ok := err.(MultiError) + if !ok { + t.Errorf("client.GetMulti got %T, expected MultiError", err) + } + for i, err := range e { + got, want := err, (error)(nil) + if !cases[i].put { + got, want = err, ErrNoSuchEntity + } + if got != want { + t.Errorf("MultiError[%d] == %v, want %v", i, got, want) + } + } +} + +type Z struct { + S string + T string `datastore:",noindex"` + P []byte + K []byte `datastore:",noindex"` +} + +func (z Z) String() string { + var lens []string + v := reflect.ValueOf(z) + for i := 0; i < v.NumField(); i++ { + if l := v.Field(i).Len(); l > 0 { + lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) + } + } + return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) +} + +func TestUnindexableValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + x1500 := strings.Repeat("x", 1500) + x1501 := strings.Repeat("x", 1501) + testCases := []struct { + in Z + wantErr bool + }{ + {in: Z{S: x1500}, wantErr: false}, + {in: Z{S: x1501}, wantErr: true}, + {in: Z{T: x1500}, wantErr: false}, + {in: Z{T: x1501}, wantErr: false}, + {in: Z{P: []byte(x1500)}, wantErr: false}, + {in: Z{P: []byte(x1501)}, wantErr: true}, + {in: Z{K: []byte(x1500)}, wantErr: false}, + {in: Z{K: []byte(x1501)}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsZ", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +func TestNilKey(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + testCases := []struct { + in K0 + wantErr bool + }{ + {in: K0{K: testKey0}, wantErr: false}, + {in: K0{}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, NewIncompleteKey(ctx, "NilKey", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +type SQChild struct { + I, J int + T, U int64 +} + +type SQTestCase struct { + desc string + q *Query + wantCount int + wantSum int +} + +func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, + testCases []SQTestCase, extraTests ...func()) { + keys := make([]*Key, len(children)) + for i := range keys { + keys[i] = NewIncompleteKey(ctx, "SQChild", parent) + } + keys, err := client.PutMulti(ctx, keys, children) + if err != nil { + t.Fatalf("client.PutMulti: %v", err) + } + defer func() { + err := client.DeleteMulti(ctx, keys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + + for _, tc := range testCases { + count, err := client.Count(ctx, tc.q) + if err != nil { + t.Errorf("Count %q: %v", tc.desc, err) + continue + } + if count != tc.wantCount { + t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) + continue + } + } + + for _, tc := range testCases { + var got []SQChild + _, err := client.GetAll(ctx, tc.q, &got) + if err != nil { + t.Errorf("client.GetAll %q: %v", tc.desc, err) + continue + } + sum := 0 + for _, c := range got { + sum += c.I + c.J + } + if sum != tc.wantSum { + t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) + continue + } + } + for _, x := range extraTests { + x() + } +} + +func TestFilters(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NewKey(ctx, "SQParent", "TestFilters"+suffix, 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "I>1", + baseQuery.Filter("I>", 1), + 6, + 2 + 3 + 4 + 5 + 6 + 7, + }, + { + "I>2 AND I<=5", + baseQuery.Filter("I>", 2).Filter("I<=", 5), + 3, + 3 + 4 + 5, + }, + { + "I>=3 AND I<3", + baseQuery.Filter("I>=", 3).Filter("I<", 3), + 0, + 0, + }, + { + "I=4", + baseQuery.Filter("I=", 4), + 1, + 4, + }, + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 7, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 0, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }) +} + +func TestLargeQuery(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NewKey(ctx, "LQParent", "TestFilters"+suffix, 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + + // Make a large number of children entities. + const n = 800 + children := make([]*SQChild, 0, n) + keys := make([]*Key, 0, n) + for i := 0; i < n; i++ { + children = append(children, &SQChild{I: i, T: now, U: now}) + keys = append(keys, NewIncompleteKey(ctx, "SQChild", parent)) + } + + // Store using PutMulti in batches. + const batchSize = 500 + for i := 0; i < n; i = i + 500 { + j := i + batchSize + if j > n { + j = n + } + fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j]) + if err != nil { + t.Fatalf("PutMulti(%d, %d): %v", i, j, err) + } + defer func() { + err := client.DeleteMulti(ctx, fullKeys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + } + + q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I") + + // Wait group to allow us to run query tests in parallel below. + var wg sync.WaitGroup + + // Check we get the expected count and results for various limits/offsets. + queryTests := []struct { + limit, offset, want int + }{ + // Just limit. + {limit: 0, want: 0}, + {limit: 100, want: 100}, + {limit: 501, want: 501}, + {limit: n, want: n}, + {limit: n * 2, want: n}, + {limit: -1, want: n}, + // Just offset. + {limit: -1, offset: 100, want: n - 100}, + {limit: -1, offset: 500, want: n - 500}, + {limit: -1, offset: n, want: 0}, + // Limit and offset. + {limit: 100, offset: 100, want: 100}, + {limit: 1000, offset: 100, want: n - 100}, + {limit: 500, offset: 500, want: n - 500}, + } + for _, tt := range queryTests { + q := q.Limit(tt.limit).Offset(tt.offset) + wg.Add(1) + + go func(limit, offset, want int) { + defer wg.Done() + // Check Count returns the expected number of results. + count, err := client.Count(ctx, q) + if err != nil { + t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err) + return + } + if count != want { + t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want) + } + + var got []SQChild + _, err = client.GetAll(ctx, q, &got) + if err != nil { + t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err) + return + } + if len(got) != want { + t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want) + } + for i, child := range got { + if got, want := child.I, i+offset; got != want { + t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want) + break + } + } + }(tt.limit, tt.offset, tt.want) + } + + // Also check iterator cursor behaviour. + cursorTests := []struct { + limit, offset int // Query limit and offset. + count int // The number of times to call "next" + want int // The I value of the desired element, -1 for "Done". + }{ + // No limits. + {count: 0, limit: -1, want: 0}, + {count: 5, limit: -1, want: 5}, + {count: 500, limit: -1, want: 500}, + {count: 1000, limit: -1, want: -1}, // No more results. + // Limits. + {count: 5, limit: 5, want: 5}, + {count: 500, limit: 5, want: 5}, + {count: 1000, limit: 1000, want: -1}, // No more results. + // Offsets. + {count: 0, offset: 5, limit: -1, want: 5}, + {count: 5, offset: 5, limit: -1, want: 10}, + {count: 200, offset: 500, limit: -1, want: 700}, + {count: 200, offset: 1000, limit: -1, want: -1}, // No more results. + } + for _, tt := range cursorTests { + wg.Add(1) + + go func(count, limit, offset, want int) { + defer wg.Done() + + // Run iterator through count calls to Next. + it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly()) + for i := 0; i < count; i++ { + _, err := it.Next(nil) + if err == Done { + break + } + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i) + return + } + } + + // Grab the cursor. + cursor, err := it.Cursor() + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err) + return + } + + // Make a request for the next element. + it = client.Run(ctx, q.Limit(1).Start(cursor)) + var entity SQChild + _, err = it.Next(&entity) + switch { + case want == -1: + if err != Done { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err) + } + case err != nil: + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err) + case entity.I != want: + t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want) + } + }(tt.count, tt.limit, tt.offset, tt.want) + } + + wg.Wait() +} + +func TestEventualConsistency(t *testing.T) { + // TODO(jba): either make this actually test eventual consistency, or + // delete it. Currently it behaves the same with or without the + // EventualConsistency call. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NewKey(ctx, "SQParent", "TestEventualConsistency"+suffix, 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + } + query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() + testSmallQueries(t, ctx, client, parent, children, nil, func() { + got, err := client.Count(ctx, query) + if err != nil { + t.Fatalf("Count: %v", err) + } + if got < 0 || 3 < got { + t.Errorf("Count: got %d, want [0,3]", got) + } + }) +} + +func TestProjection(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NewKey(ctx, "SQParent", "TestProjection"+suffix, 0, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 1 << 0, J: 100, T: now, U: now}, + {I: 1 << 1, J: 100, T: now, U: now}, + {I: 1 << 2, J: 200, T: now, U: now}, + {I: 1 << 3, J: 300, T: now, U: now}, + {I: 1 << 4, J: 300, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "project", + baseQuery.Project("J"), + 3, + 200 + 300 + 300, + }, + { + "distinct", + baseQuery.Project("J").Distinct(), + 2, + 200 + 300, + }, + { + "project on meaningful (GD_WHEN) field", + baseQuery.Project("U"), + 3, + 0, + }, + }) +} + +func TestAllocateIDs(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + keys := make([]*Key, 5) + for i := range keys { + keys[i] = NewIncompleteKey(ctx, "AllocID", nil) + } + keys, err := client.AllocateIDs(ctx, keys) + if err != nil { + t.Errorf("AllocID #0 failed: %v", err) + } + if want := len(keys); want != 5 { + t.Errorf("Expected to allocate 5 keys, %d keys are found", want) + } + for _, k := range keys { + if k.Incomplete() { + t.Errorf("Unexpeceted incomplete key found: %v", k) + } + } +} + +func TestGetAllWithFieldMismatch(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Fat struct { + X, Y int + } + type Thin struct { + X int + } + + // Ancestor queries (those within an entity group) are strongly consistent + // by default, which prevents a test from being flaky. + // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency + // for more information. + parent := NewKey(ctx, "SQParent", "TestGetAllWithFieldMismatch"+suffix, 0, nil) + putKeys := make([]*Key, 3) + for i := range putKeys { + putKeys[i] = NewKey(ctx, "GetAllThing", "", int64(10+i), parent) + _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + } + + var got []Thin + want := []Thin{ + {X: 20}, + {X: 21}, + {X: 22}, + } + getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) + if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { + t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) + } + if _, ok := err.(*ErrFieldMismatch); !ok { + t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) + } +} + +func TestKindlessQueries(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Dee struct { + I int + Why string + } + type Dum struct { + I int + Pling string + } + + parent := NewKey(ctx, "Tweedle", "tweedle"+suffix, 0, nil) + + keys := []*Key{ + NewKey(ctx, "Dee", "dee0", 0, parent), + NewKey(ctx, "Dum", "dum1", 0, parent), + NewKey(ctx, "Dum", "dum2", 0, parent), + NewKey(ctx, "Dum", "dum3", 0, parent), + } + src := []interface{}{ + &Dee{1, "binary0001"}, + &Dum{2, "binary0010"}, + &Dum{4, "binary0100"}, + &Dum{8, "binary1000"}, + } + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("put: %v", err) + } + + testCases := []struct { + desc string + query *Query + want []int + wantErr string + }{ + { + desc: "Dee", + query: NewQuery("Dee"), + want: []int{1}, + }, + { + desc: "Doh", + query: NewQuery("Doh"), + want: nil}, + { + desc: "Dum", + query: NewQuery("Dum"), + want: []int{2, 4, 8}, + }, + { + desc: "", + query: NewQuery(""), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless filter", + query: NewQuery("").Filter("__key__ =", keys[2]), + want: []int{4}, + }, + { + desc: "Kindless order", + query: NewQuery("").Order("__key__"), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless bad filter", + query: NewQuery("").Filter("I =", 4), + wantErr: "kind is required", + }, + { + desc: "Kindless bad order", + query: NewQuery("").Order("-__key__"), + wantErr: "kind is required for all orders except __key__ ascending", + }, + } +loop: + for _, tc := range testCases { + q := tc.query.Ancestor(parent) + gotCount, err := client.Count(ctx, q) + if err != nil { + if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) + } + continue + } + if tc.wantErr != "" { + t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) + continue + } + if gotCount != len(tc.want) { + t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) + continue + } + var got []int + for iter := client.Run(ctx, q); ; { + var dst struct { + I int + Why, Pling string + } + _, err := iter.Next(&dst) + if err == Done { + break + } + if err != nil { + t.Errorf("iter.Next %q: %v", tc.desc, err) + continue loop + } + got = append(got, dst.I) + } + sort.Ints(got) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) + continue + } + } +} + +func TestTransaction(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Counter struct { + N int + T time.Time + } + + bangErr := errors.New("bang") + tests := []struct { + desc string + causeConflict []bool + retErr []error + want int + wantErr error + }{ + { + desc: "3 attempts, no conflicts", + causeConflict: []bool{false}, + retErr: []error{nil}, + want: 11, + }, + { + desc: "1 attempt, user error", + causeConflict: []bool{false}, + retErr: []error{bangErr}, + wantErr: bangErr, + }, + { + desc: "2 attempts, 1 conflict", + causeConflict: []bool{true, false}, + retErr: []error{nil, nil}, + want: 13, // Each conflict increments by 2. + }, + { + desc: "3 attempts, 3 conflicts", + causeConflict: []bool{true, true, true}, + retErr: []error{nil, nil, nil}, + wantErr: ErrConcurrentTransaction, + }, + } + + for i, tt := range tests { + // Put a new counter. + c := &Counter{N: 10, T: time.Now()} + key, err := client.Put(ctx, NewIncompleteKey(ctx, "TransCounter", nil), c) + if err != nil { + t.Errorf("%s: client.Put: %v", tt.desc, err) + continue + } + defer client.Delete(ctx, key) + + // Increment the counter in a transaction. + // The test case can manually cause a conflict or return an + // error at each attempt. + var attempts int + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + attempts++ + if attempts > len(tt.causeConflict) { + return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict)) + } + + var c Counter + if err := tx.Get(key, &c); err != nil { + return err + } + c.N++ + if _, err := tx.Put(key, &c); err != nil { + return err + } + + if tt.causeConflict[attempts-1] { + c.N += 1 + if _, err := client.Put(ctx, key, &c); err != nil { + return err + } + } + + return tt.retErr[attempts-1] + }, MaxAttempts(i)) + + // Check the error returned by RunInTransaction. + if err != tt.wantErr { + t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr) + continue + } + if err != nil { + continue + } + + // Check the final value of the counter. + if err := client.Get(ctx, key, c); err != nil { + t.Errorf("%s: client.Get: %v", tt.desc, err) + continue + } + if c.N != tt.want { + t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want) + } + } +} + +func TestNilPointers(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + S string + } + + src := []*X{{"zero"}, {"one"}} + keys := []*Key{NewIncompleteKey(ctx, "NilX", nil), NewIncompleteKey(ctx, "NilX", nil)} + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("PutMulti: %v", err) + } + + // It's okay to store into a slice of nil *X. + xs := make([]*X, 2) + if err := client.GetMulti(ctx, keys, xs); err != nil { + t.Errorf("GetMulti: %v", err) + } else if !reflect.DeepEqual(xs, src) { + t.Errorf("GetMulti fetched %v, want %v", xs, src) + } + + // It isn't okay to store into a single nil *X. + var x0 *X + if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want { + t.Errorf("Get: err %v; want %v", err, want) + } + + if err := client.DeleteMulti(ctx, keys); err != nil { + t.Errorf("Delete: %v", err) + } +} + +func TestNestedRepeatedElementNoIndex(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Inner struct { + Name string + Value string `datastore:",noindex"` + } + type Outer struct { + Config []Inner + } + m := &Outer{ + Config: []Inner{ + {Name: "short", Value: "a"}, + {Name: "long", Value: strings.Repeat("a", 2000)}, + }, + } + + key := NewKey(ctx, "Nested", "Nested"+suffix, 0, nil) + if _, err := client.Put(ctx, key, m); err != nil { + t.Fatalf("client.Put: %v", err) + } + if err := client.Delete(ctx, key); err != nil { + t.Fatalf("client.Delete: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/datastore/key.go b/vendor/cloud.google.com/go/datastore/key.go new file mode 100644 index 000000000..ab3f084ca --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key.go @@ -0,0 +1,279 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// Key represents the datastore key for a stored entity, and is immutable. +type Key struct { + kind string + id int64 + name string + parent *Key + + namespace string +} + +func (k *Key) Kind() string { + return k.kind +} + +func (k *Key) ID() int64 { + return k.id +} + +func (k *Key) Name() string { + return k.name +} + +func (k *Key) Parent() *Key { + return k.parent +} + +func (k *Key) SetParent(v *Key) { + if v.Incomplete() { + panic("can't set an incomplete key as parent") + } + k.parent = v +} + +func (k *Key) Namespace() string { + return k.namespace +} + +// Complete returns whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.name == "" && k.id == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.parent { + if k.kind == "" { + return false + } + if k.name != "" && k.id != 0 { + return false + } + if k.parent != nil { + if k.parent.Incomplete() { + return false + } + if k.parent.namespace != k.namespace { + return false + } + } + } + return true +} + +func (k *Key) Equal(o *Key) bool { + for { + if k == nil || o == nil { + return k == o // if either is nil, both must be nil + } + if k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind { + return false + } + if k.parent == nil && o.parent == nil { + return true + } + k = k.parent + o = o.parent + } +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.parent != nil { + k.parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.kind) + b.WriteByte(',') + if k.name != "" { + b.WriteString(k.name) + } else { + b.WriteString(strconv.FormatInt(k.id, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +// Note: Fields not renamed compared to appengine gobKey struct +// This ensures gobs created by appengine can be read here, and vice/versa +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.kind, + StringID: k.name, + IntID: k.id, + Parent: keyToGobKey(k.parent), + Namespace: k.namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + kind: gk.Kind, + name: gk.StringID, + id: gk.IntID, + parent: gobKeyToKey(gk.Parent), + namespace: gk.Namespace, + } +} + +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + pKey := keyToProto(k) + + b, err := proto.Marshal(pKey) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(pb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + return protoToKey(pKey) +} + +// NewIncompleteKey creates a new incomplete key. +// kind cannot be empty. +func NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key { + return NewKey(ctx, kind, "", 0, parent) +} + +// NewKey creates a new key. +// kind cannot be empty. +// At least one of name and id must be zero. If both are zero, the key returned +// is incomplete. +// parent must either be a complete key or nil. +func NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key { + return &Key{ + kind: kind, + name: name, + id: id, + parent: parent, + namespace: ctxNamespace(ctx), + } +} + +// AllocateIDs accepts a slice of incomplete keys and returns a +// slice of complete keys that are guaranteed to be valid in the datastore +func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { + if keys == nil { + return nil, nil + } + + req := &pb.AllocateIdsRequest{ + ProjectId: c.dataset, + Keys: multiKeyToProto(keys), + } + resp, err := c.client.AllocateIds(ctx, req) + if err != nil { + return nil, err + } + + return multiProtoToKey(resp.Keys) +} diff --git a/vendor/cloud.google.com/go/datastore/key_test.go b/vendor/cloud.google.com/go/datastore/key_test.go new file mode 100644 index 000000000..0c0a175a8 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key_test.go @@ -0,0 +1,242 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" + + "golang.org/x/net/context" +) + +func TestNamespace(t *testing.T) { + c := context.Background() + k := NewIncompleteKey(c, "foo", nil) + if got, want := k.Namespace(), ""; got != want { + t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) + } + + c = WithNamespace(c, "gopherspace") + k = NewIncompleteKey(c, "foo", nil) + if got, want := k.Namespace(), "gopherspace"; got != want { + t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) + } +} + +func TestParent(t *testing.T) { + c := context.Background() + k := NewIncompleteKey(c, "foo", nil) + par := NewKey(c, "foomum", "", 1248, nil) + k.SetParent(par) + if got := k.Parent(); got != par { + t.Errorf("k.Parent() = %v; want %v", got, par) + } +} + +func TestEqual(t *testing.T) { + c := context.Background() + cN := WithNamespace(c, "gopherspace") + + testCases := []struct { + x, y *Key + equal bool + }{ + { + x: nil, + y: nil, + equal: true, + }, + { + x: NewKey(c, "kindA", "", 0, nil), + y: NewIncompleteKey(c, "kindA", nil), + equal: true, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "nameA", 0, nil), + equal: true, + }, + { + x: NewKey(cN, "kindA", "nameA", 0, nil), + y: NewKey(cN, "kindA", "nameA", 0, nil), + equal: true, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + equal: true, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindB", "nameA", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "nameB", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(c, "kindA", "", 1337, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "nameA", 0, nil), + y: NewKey(cN, "kindA", "nameA", 0, nil), + equal: false, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindY", "nameX", 0, nil)), + equal: false, + }, + { + x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), + y: NewKey(c, "kindA", "", 1337, nil), + equal: false, + }, + } + + for _, tt := range testCases { + if got := tt.x.Equal(tt.y); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) + } + if got := tt.y.Equal(tt.x); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) + } + } +} + +func TestEncoding(t *testing.T) { + c := context.Background() + cN := WithNamespace(c, "gopherspace") + + testCases := []struct { + k *Key + valid bool + }{ + { + k: nil, + valid: false, + }, + { + k: NewKey(c, "", "", 0, nil), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, nil), + valid: true, + }, + { + k: NewKey(cN, "kindA", "", 0, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "nameA", 0, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "", 1337, nil), + valid: true, + }, + { + k: NewKey(c, "kindA", "nameA", 1337, nil), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "nameB", 0, nil)), + valid: true, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "", 0, nil)), + valid: false, + }, + { + k: NewKey(c, "kindA", "", 0, NewKey(cN, "kindB", "nameB", 0, nil)), + valid: false, + }, + } + + for _, tt := range testCases { + if got := tt.k.valid(); got != tt.valid { + t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) + } + + // Check encoding/decoding for valid keys. + if !tt.valid { + continue + } + enc := tt.k.Encode() + dec, err := DecodeKey(enc) + if err != nil { + t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) + continue + } + if !tt.k.Equal(dec) { + t.Logf("Proto: %s", keyToProto(tt.k)) + t.Errorf("Decoded key %v not equal to %v", dec, tt.k) + } + + b, err := json.Marshal(tt.k) + if err != nil { + t.Errorf("json.Marshal(%v): %v", tt.k, err) + continue + } + key := &Key{} + if err := json.Unmarshal(b, key); err != nil { + t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) + continue + } + if !tt.k.Equal(key) { + t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) + } + + buf := &bytes.Buffer{} + gobEnc := gob.NewEncoder(buf) + if err := gobEnc.Encode(tt.k); err != nil { + t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) + continue + } + gobDec := gob.NewDecoder(buf) + key = &Key{} + if err := gobDec.Decode(key); err != nil { + t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) + } + if !tt.k.Equal(key) { + t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) + } + } +} + +func TestInvalidKeyDecode(t *testing.T) { + // Check that decoding an invalid key returns an err and doesn't panic. + enc := NewKey(context.Background(), "Kind", "Foo", 0, nil).Encode() + + invalid := []string{ + "", + "Laboratorio", + enc + "Junk", + enc[:len(enc)-4], + } + for _, enc := range invalid { + key, err := DecodeKey(enc) + if err == nil || key != nil { + t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go new file mode 100644 index 000000000..f7b4947eb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -0,0 +1,383 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "time" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +var ( + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfTime = reflect.TypeOf(time.Time{}) + typeOfGeoPoint = reflect.TypeOf(GeoPoint{}) + typeOfKeyPtr = reflect.TypeOf(&Key{}) + typeOfEntityPtr = reflect.TypeOf(&Entity{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case GeoPoint: + entityType = "GeoPoint" + case time.Time: + entityType = "time.Time" + case []byte: + entityType = "[]byte" + } + + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string { + sl, ok := p.Value.([]interface{}) + if !ok { + return l.loadOneElement(codec, structValue, p, prev) + } + for _, val := range sl { + p.Value = val + if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" { + return errStr + } + } + return "" +} + +// loadOneElement loads the value of Property p into structValue based on the provided +// codec. codec is used to find the field in structValue into which p should be loaded. +// prev is the set of property names already seen for structValue. +func (l *propertyLoader) loadOneElement(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string { + var sliceOk bool + var sliceIndex int + var v reflect.Value + + name := p.Name + for name != "" { + // First we try to find a field with name matching + // the value of 'name' exactly. + decoder, ok := codec.fields[name] + if ok { + name = "" + } else { + // Now try for legacy flattened nested field (named eg. "A.B.C.D"). + + parent := name + child := "" + + // Cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for !ok { + i := strings.LastIndex(parent, ".") + if i < 0 { + return "no such struct field" + } + if i == len(name)-1 { + return "field name cannot end with '.'" + } + parent, child = name[:i], name[i+1:] + decoder, ok = codec.fields[parent] + } + + name = child + } + + v = initField(structValue, decoder.path) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + if decoder.structCodec != nil { + codec = decoder.structCodec + structValue = v + } + + // If the element is a slice, we need to accommodate it. + if v.Kind() == reflect.Slice { + if l.m == nil { + l.m = make(map[string]int) + } + sliceIndex = l.m[p.Name] + l.m[p.Name] = sliceIndex + 1 + for v.Len() <= sliceIndex { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(sliceIndex) + sliceOk = true + } + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if _, ok := prev[p.Name]; ok && !sliceOk { + // Zero the field back out that was set previously, turns out + // it's a slice and we don't know what to do with it + v.Set(reflect.Zero(v.Type())) + return "multiple-valued property requires a slice field type" + } + + prev[p.Name] = struct{}{} + + if errReason := setVal(v, p); errReason != "" { + // Set the slice back to its zero value. + if slice.IsValid() { + slice.Set(reflect.Zero(slice.Type())) + } + return errReason + } + + if slice.IsValid() { + slice.Index(sliceIndex).Set(v) + } + + return "" +} + +// setVal sets 'v' to the value of the Property 'p'. +func setVal(v reflect.Value, p Property) string { + pValue := p.Value + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + x, ok := pValue.(string) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetString(x) + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + x, ok := pValue.(*Key) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + ent, ok := pValue.(*Entity) + if !ok { + return typeMismatchReason(p, v) + } + + // Recursively load nested struct + pls, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return err.Error() + } + + // if ent has a Key value and our struct has a Key field, + // load the Entity's Key value into the Key field on the struct. + if ent.Key != nil && pls.codec.keyField != -1 { + pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key)) + } + + err = pls.Load(ent.Properties) + if err != nil { + return err.Error() + } + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + return "" +} + +// initField is similar to reflect's Value.FieldByIndex, in that it +// returns the nested struct field corresponding to index, but it +// initialises any nil pointers encountered when traversing the structure. +func initField(val reflect.Value, index []int) reflect.Value { + for _, i := range index[:len(index)-1] { + val = val.Field(i) + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + } + return val.Field(index[len(index)-1]) +} + +// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntity(dst interface{}, src *pb.Entity) (err error) { + ent, err := protoToEntity(src) + if err != nil { + return err + } + if e, ok := dst.(PropertyLoadSaver); ok { + return e.Load(ent.Properties) + } + return LoadStruct(dst, ent.Properties) +} + +func (s structPLS) Load(props []Property) error { + var fieldName, errReason string + var l propertyLoader + + prev := make(map[string]struct{}) + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, errReason = p.Name, errStr + } + } + if errReason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: errReason, + } + } + return nil +} + +func protoToEntity(src *pb.Entity) (*Entity, error) { + props := make([]Property, 0, len(src.Properties)) + for name, val := range src.Properties { + v, err := propToValue(val) + if err != nil { + return nil, err + } + props = append(props, Property{ + Name: name, + Value: v, + NoIndex: val.ExcludeFromIndexes, + }) + } + var key *Key + if src.Key != nil { + // Ignore any error, since nested entity values + // are allowed to have an invalid key. + key, _ = protoToKey(src.Key) + } + + return &Entity{key, props}, nil +} + +// propToValue returns a Go value that represents the PropertyValue. For +// example, a TimestampValue becomes a time.Time. +func propToValue(v *pb.Value) (interface{}, error) { + switch v := v.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil + case *pb.Value_KeyValue: + return protoToKey(v.KeyValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BlobValue: + return []byte(v.BlobValue), nil + case *pb.Value_GeoPointValue: + return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil + case *pb.Value_EntityValue: + return protoToEntity(v.EntityValue) + case *pb.Value_ArrayValue: + arr := make([]interface{}, 0, len(v.ArrayValue.Values)) + for _, v := range v.ArrayValue.Values { + vv, err := propToValue(v) + if err != nil { + return nil, err + } + arr = append(arr, vv) + } + return arr, nil + default: + return nil, nil + } +} diff --git a/vendor/cloud.google.com/go/datastore/load_test.go b/vendor/cloud.google.com/go/datastore/load_test.go new file mode 100644 index 000000000..fa1ae0126 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load_test.go @@ -0,0 +1,414 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRNestedSimpleWithTagIES OR CONDITIONS OF NestedSimpleY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type Simple struct { + I int64 +} + +type SimpleWithTag struct { + I int64 `datastore:"II"` +} + +type NestedSimpleWithTag struct { + A SimpleWithTag `datastore:"AA"` +} + +type NestedSliceOfSimple struct { + A []Simple +} + +type SimpleTwoFields struct { + S string + SS string +} + +type NestedSimpleAnonymous struct { + Simple + X string +} + +type NestedSimple struct { + A Simple + I int +} + +type NestedSimple1 struct { + A Simple + X string +} + +type NestedSimple2X struct { + AA NestedSimple + A SimpleTwoFields + S string +} + +type BDotB struct { + B string `datastore:"B.B"` +} + +type ABDotB struct { + A BDotB +} + +type MultiAnonymous struct { + Simple + SimpleTwoFields + X string +} + +func TestLoadEntityNestedLegacy(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + "nested", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "A.I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimple1{ + A: Simple{I: 2}, + X: "two", + }, + }, + { + "nested with tag", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "AA.II": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimpleWithTag{ + A: SimpleWithTag{I: 2}, + }, + }, + { + "nested with anonymous struct field", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimpleAnonymous{ + Simple: Simple{I: 2}, + X: "two", + }, + }, + { + "nested with dotted field tag", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A.B.B": {ValueType: &pb.Value_StringValue{"bb"}}, + }, + }, + &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + "nested with multiple anonymous fields", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + "X": {ValueType: &pb.Value_StringValue{"s"}}, + }, + }, + &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "s", + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntity(dst, tc.src) + if err != nil { + t.Errorf("loadEntity: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` +} + +type NestedWithKey struct { + Y string + N WithKey +} + +var ( + incompleteKey = newKey("", nil) + invalidKey = newKey("s", incompleteKey) +) + +func TestLoadEntityNested(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + "nested basic", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{10}}, + }, + }, + &NestedSimple{ + A: Simple{I: 3}, + I: 10, + }, + }, + { + "nested with struct tags", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "II": {ValueType: &pb.Value_IntegerValue{1}}, + }, + }, + }}, + }, + }, + &NestedSimpleWithTag{ + A: SimpleWithTag{I: 1}, + }, + }, + { + "nested 2x", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{1}}, + }, + }, + }}, + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_StringValue{"SS"}}, + }, + }, + &NestedSimple2X{ + AA: NestedSimple{ + A: Simple{I: 3}, + I: 1, + }, + A: SimpleTwoFields{S: "S", SS: "s"}, + S: "SS", + }, + }, + { + "nested anonymous", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "X": {ValueType: &pb.Value_StringValue{"SomeX"}}, + }, + }, + &NestedSimpleAnonymous{ + Simple: Simple{I: 3}, + X: "SomeX", + }, + }, + { + "nested simple with slice", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_ArrayValue{ + &pb.ArrayValue{ + []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{4}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + + &NestedSliceOfSimple{ + A: []Simple{Simple{I: 3}, Simple{I: 4}}, + }, + }, + { + "nested with multiple anonymous fields", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + "X": {ValueType: &pb.Value_StringValue{"ss"}}, + }, + }, + &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "ss", + }, + }, + { + "nested with dotted field tag", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "B.B": {ValueType: &pb.Value_StringValue{"bb"}}, + }, + }, + }}, + }, + }, + &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + "nested entity with key", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + }, + { + "nested entity with invalid key", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(invalidKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: invalidKey, + }, + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntity(dst, tc.src) + if err != nil { + t.Errorf("loadEntity: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/prop.go b/vendor/cloud.google.com/go/datastore/prop.go new file mode 100644 index 000000000..60c6969ac --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/prop.go @@ -0,0 +1,324 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "sync" + "unicode" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. Each property +// name must be unique within an entity. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - *Key + // - time.Time + // - GeoPoint + // - []byte (up to 1 megabyte in length) + // - *Entity (representing a nested struct) + // Value can also be: + // - []interface{} where each element is one of the above types + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Value's type must be explicitly on + // the list above; it is not sufficient for the underlying type to be + // on that list. For example, a Value of "type myInt64 int64" is + // invalid. Smaller-width integers and floats are also invalid. Again, + // this is more restrictive than the set of valid struct field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + // If NoIndex is set to false, []byte and string values are limited to + // 1500 bytes. + NoIndex bool +} + +// An Entity is the value type for a nested struct. +// This type is only used for a Property's Value. +type Entity struct { + Key *Key + Properties []Property +} + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice of Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// structCodec describes how to convert a struct to and from a sequence of +// properties. +type structCodec struct { + // fields gives the field codec for the structTag with the given name. + fields map[string]fieldCodec + // hasSlice is whether a struct or any of its nested or embedded structs + // has a slice-typed field (other than []byte). + hasSlice bool + // keyField is the index of a *Key field with structTag __key__. + // This field is not relevant for the top level struct, only for + // nested structs. + keyField int + // complete is whether the structCodec is complete. An incomplete + // structCodec may be encountered when walking a recursive struct. + complete bool +} + +// fieldCodec is a struct field's index and, if that struct field's type is +// itself a struct, that substruct's structCodec. +type fieldCodec struct { + // path is the index path to the field + path []int + noIndex bool + // structCodec is the codec fot the struct field at index 'path', + // or nil if the field is not a struct. + structCodec *structCodec +} + +// structCodecs collects the structCodecs that have already been calculated. +var ( + structCodecsMutex sync.Mutex + structCodecs = make(map[reflect.Type]*structCodec) +) + +// getStructCodec returns the structCodec for the given struct type. +func getStructCodec(t reflect.Type) (*structCodec, error) { + structCodecsMutex.Lock() + defer structCodecsMutex.Unlock() + return getStructCodecLocked(t) +} + +// getStructCodecLocked implements getStructCodec. The structCodecsMutex must +// be held when calling this function. +func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { + c, ok := structCodecs[t] + if ok { + return c, nil + } + c = &structCodec{ + fields: make(map[string]fieldCodec), + // We initialize keyField to -1 so that the zero-value is not + // misinterpreted as index 0. + keyField: -1, + } + + // Add c to the structCodecs map before we are sure it is good. If t is + // a recursive type, it needs to find the incomplete entry for itself in + // the map. + structCodecs[t] = c + defer func() { + if retErr != nil { + delete(structCodecs, t) + } + }() + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + // Skip unexported fields. + // Note that if f is an anonymous, unexported struct field, + // we will not promote its fields. We will skip f entirely. + if f.PkgPath != "" { + continue + } + + name, opts := f.Tag.Get("datastore"), "" + if i := strings.Index(name, ","); i != -1 { + name, opts = name[:i], name[i+1:] + } + switch { + case name == "": + if !f.Anonymous { + name = f.Name + } + case name == "-": + continue + case name == "__key__": + if f.Type != typeOfKeyPtr { + return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t) + } + c.keyField = i + continue + case !validPropertyName(name): + return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) + } + + substructType, fIsSlice := reflect.Type(nil), false + switch f.Type.Kind() { + case reflect.Struct: + substructType = f.Type + case reflect.Slice: + if f.Type.Elem().Kind() == reflect.Struct { + substructType = f.Type.Elem() + } + fIsSlice = f.Type != typeOfByteSlice + c.hasSlice = c.hasSlice || fIsSlice + } + + var sub *structCodec + if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { + var err error + sub, err = getStructCodecLocked(substructType) + if err != nil { + return nil, err + } + if !sub.complete { + return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) + } + if fIsSlice && sub.hasSlice { + return nil, fmt.Errorf( + "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) + } + c.hasSlice = c.hasSlice || sub.hasSlice + + // If name is empty at this point, f is an anonymous struct field. + // In this case, we promote the substruct's fields up to this level + // in the linked list of struct codecs. + if name == "" { + for subname, subfield := range sub.fields { + if _, ok := c.fields[subname]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname) + } + c.fields[subname] = fieldCodec{ + path: append([]int{i}, subfield.path...), + noIndex: subfield.noIndex || opts == "noindex", + structCodec: subfield.structCodec, + } + } + continue + } + } + + if _, ok := c.fields[name]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) + } + c.fields[name] = fieldCodec{ + path: []int{i}, + noIndex: opts == "noindex", + structCodec: sub, + } + } + c.complete = true + return c, nil +} + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec *structCodec +} + +// newStructPLS returns a structPLS, which implements the +// PropertyLoadSaver interface, for the struct pointer p. +func newStructPLS(p interface{}) (*structPLS, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + codec, err := getStructCodec(v.Type()) + if err != nil { + return nil, err + } + return &structPLS{v, codec}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} diff --git a/vendor/cloud.google.com/go/datastore/query.go b/vendor/cloud.google.com/go/datastore/query.go new file mode 100644 index 000000000..dc0e3bccb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query.go @@ -0,0 +1,743 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type operator int + +const ( + lessThan operator = iota + 1 + lessEq + equal + greaterEq + greaterThan + + keyFieldName = "__key__" +) + +var operatorToProto = map[operator]pb.PropertyFilter_Operator{ + lessThan: pb.PropertyFilter_LESS_THAN, + lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL, + equal: pb.PropertyFilter_EQUAL, + greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL, + greaterThan: pb.PropertyFilter_GREATER_THAN, +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection bool + +const ( + ascending sortDirection = false + descending sortDirection = true +) + +var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{ + ascending: pb.PropertyOrder_ASCENDING, + descending: pb.PropertyOrder_DESCENDING, +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + keysOnly bool + eventual bool + limit int32 + offset int32 + start []byte + end []byte + + trans *Transaction + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Transaction returns a derivative query that is associated with the given +// transaction. +// +// All reads performed as part of the transaction will come from a single +// consistent snapshot. Furthermore, if the transaction is set to a +// serializable isolation level, another transaction cannot concurrently modify +// the data that is read or modified by this transaction. +func (q *Query) Transaction(t *Transaction) *Query { + q = q.clone() + q.trans = t + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +// Field names which contain spaces, quote marks, or operator characters +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if filterStr == "" { + q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + var err error + f.FieldName, err = unquote(f.FieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +// Field names which contain spaces, quote marks, or the minus sign +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName, dir := strings.TrimSpace(fieldName), ascending + if strings.HasPrefix(fieldName, "-") { + fieldName, dir = strings.TrimSpace(fieldName[1:]), descending + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + fieldName, err := unquote(fieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) + return q + } + if fieldName == "" { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, order{ + Direction: dir, + FieldName: fieldName, + }) + return q +} + +// unquote optionally interprets s as a double-quoted or backquoted Go +// string literal if it begins with the relevant character. +func unquote(s string) (string, error) { + if s == "" || (s[0] != '`' && s[0] != '"') { + return s, nil + } + return strconv.Unquote(s) +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(req *pb.RunQueryRequest) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + dst := &pb.Query{} + if q.kind != "" { + dst.Kind = []*pb.KindExpression{{Name: q.kind}} + } + if q.projection != nil { + for _, propertyName := range q.projection { + dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}}) + } + + if q.distinct { + for _, propertyName := range q.projection { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + } + } + if q.keysOnly { + dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}} + } + + var filters []*pb.Filter + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false) + if err != nil { + return fmt.Errorf("datastore: bad query filter value type: %v", err) + } + op, ok := operatorToProto[qf.Op] + if !ok { + return errors.New("datastore: unknown query filter operator") + } + xf := &pb.PropertyFilter{ + Op: op, + Property: &pb.PropertyReference{Name: qf.FieldName}, + Value: v, + } + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{xf}, + }) + } + + if q.ancestor != nil { + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{&pb.PropertyFilter{ + Property: &pb.PropertyReference{Name: "__key__"}, + Op: pb.PropertyFilter_HAS_ANCESTOR, + Value: &pb.Value{ValueType: &pb.Value_KeyValue{keyToProto(q.ancestor)}}, + }}}) + } + + if len(filters) == 1 { + dst.Filter = filters[0] + } else if len(filters) > 1 { + dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{&pb.CompositeFilter{ + Op: pb.CompositeFilter_AND, + Filters: filters, + }}} + } + + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.PropertyOrder{ + Property: &pb.PropertyReference{Name: qo.FieldName}, + Direction: sortDirectionToProto[qo.Direction], + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = &wrapperspb.Int32Value{q.limit} + } + dst.Offset = q.offset + dst.StartCursor = q.start + dst.EndCursor = q.end + + if t := q.trans; t != nil { + if t.id == nil { + return errExpiredTransaction + } + if q.eventual { + return errors.New("datastore: cannot use EventualConsistency query in a transaction") + } + req.ReadOptions = &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{t.id}, + } + } + + if q.eventual { + req.ReadOptions = &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}} + } + + req.QueryType = &pb.RunQueryRequest_Query{dst} + return nil +} + +// Count returns the number of results for the given query. +// +// The running time and number of API calls made by Count scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (c *Client) Count(ctx context.Context, q *Query) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Create a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible). + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + + // Create an iterator and use it to walk through the batches of results + // directly. + it := c.Run(ctx, newQ) + n := 0 + for { + err := it.nextBatch() + if err == Done { + return n, nil + } + if err != nil { + return 0, err + } + n += len(it.results) + } +} + +// GetAll runs the provided query in the given context and returns all keys +// that match that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := c.Run(ctx, q); ; { + k, e, err := t.next() + if err == Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntity(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the given query in the given context. +func (c *Client) Run(ctx context.Context, q *Query) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + ctx: ctx, + client: c, + limit: q.limit, + offset: q.offset, + keysOnly: q.keysOnly, + pageCursor: q.start, + entityCursor: q.start, + req: &pb.RunQueryRequest{ + ProjectId: c.dataset, + }, + } + if ns := ctxNamespace(ctx); ns != "" { + t.req.PartitionId = &pb.PartitionId{ + NamespaceId: ns, + } + } + if err := q.toProto(t.req); err != nil { + t.err = err + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + ctx context.Context + client *Client + err error + + // results is the list of EntityResults still to be iterated over from the + // most recent API call. It will be nil if no requests have yet been issued. + results []*pb.EntityResult + // req is the request to send. It may be modified and used multiple times. + req *pb.RunQueryRequest + + // limit is the limit on the number of results this iterator should return. + // The zero value is used to prevent further fetches from the server. + // A negative value means unlimited. + limit int32 + // offset is the number of results that still need to be skipped. + offset int32 + // keysOnly records whether the query was keys-only (skip entity loading). + keysOnly bool + + // pageCursor is the compiled cursor for the next batch/page of result. + // TODO(djd): Can we delete this in favour of paging with the last + // entityCursor from each batch? + pageCursor []byte + // entityCursor is the compiled cursor of the next result. + entityCursor []byte +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("datastore: query has no more results") + +// Next returns the key of the next result. When there are no more results, +// Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.keysOnly { + err = loadEntity(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.Entity, error) { + // Fetch additional batches while there are no more results. + for t.err == nil && len(t.results) == 0 { + t.err = t.nextBatch() + } + if t.err != nil { + return nil, nil, t.err + } + + // Extract the next result, update cursors, and parse the entity's key. + e := t.results[0] + t.results = t.results[1:] + t.entityCursor = e.Cursor + if len(t.results) == 0 { + t.entityCursor = t.pageCursor // At the end of the batch. + } + if e.Entity.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Entity.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + + return k, e.Entity, nil +} + +// nextBatch makes a single call to the server for a batch of results. +func (t *Iterator) nextBatch() error { + if t.limit == 0 { + return Done // Short-circuits the zero-item response. + } + + // Adjust the query with the latest start cursor, limit and offset. + q := t.req.GetQuery() + q.StartCursor = t.pageCursor + q.Offset = t.offset + if t.limit >= 0 { + q.Limit = &wrapperspb.Int32Value{t.limit} + } else { + q.Limit = nil + } + + // Run the query. + resp, err := t.client.client.RunQuery(t.ctx, t.req) + if err != nil { + return err + } + + // Adjust any offset from skipped results. + skip := resp.Batch.SkippedResults + if skip < 0 { + return errors.New("datastore: internal error: negative number of skipped_results") + } + t.offset -= skip + if t.offset < 0 { + return errors.New("datastore: internal error: query skipped too many results") + } + if t.offset > 0 && len(resp.Batch.EntityResults) > 0 { + return errors.New("datastore: internal error: query returned results before requested offset") + } + + // Adjust the limit. + if t.limit >= 0 { + t.limit -= int32(len(resp.Batch.EntityResults)) + if t.limit < 0 { + return errors.New("datastore: internal error: query returned more results than the limit") + } + } + + // If there are no more results available, set limit to zero to prevent + // further fetches. Otherwise, check that there is a next page cursor available. + if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED { + t.limit = 0 + } else if resp.Batch.EndCursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + + // Update cursors. + // If any results were skipped, use the SkippedCursor as the next entity cursor. + if skip > 0 { + t.entityCursor = resp.Batch.SkippedCursor + } else { + t.entityCursor = q.StartCursor + } + t.pageCursor = resp.Batch.EndCursor + + t.results = resp.Batch.EntityResults + return nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + // If there is still an offset, we need to the skip those results first. + for t.err == nil && t.offset > 0 { + t.err = t.nextBatch() + } + + if t.err != nil && t.err != Done { + return Cursor{}, t.err + } + + return Cursor{t.entityCursor}, nil +} + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +// +// The zero Cursor can be used to indicate that there is no start and/or end +// constraint for a query. +type Cursor struct { + cc []byte +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + + return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + return Cursor{b}, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query_test.go b/vendor/cloud.google.com/go/datastore/query_test.go new file mode 100644 index 000000000..6a5ae3173 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query_test.go @@ -0,0 +1,538 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +var ( + key1 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{6}, + }, + }, + } + key2 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{6}, + }, + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{8}, + }, + }, + } +) + +type fakeClient struct { + pb.DatastoreClient + queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error) +} + +func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return c.queryFn(req) +} + +func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) { + return c.commitFn(req) +} + +func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + expectedIn := &pb.RunQueryRequest{ + QueryType: &pb.RunQueryRequest_Query{&pb.Query{ + Kind: []*pb.KindExpression{{Name: "Gopher"}}, + }}, + } + if !proto.Equal(in, expectedIn) { + return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + return &pb.RunQueryResponse{ + Batch: &pb.QueryResultBatch{ + MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS, + EntityResultType: pb.EntityResult_FULL, + EntityResults: []*pb.EntityResult{ + { + Entity: &pb.Entity{ + Key: key1, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{"George"}}, + "Height": {ValueType: &pb.Value_IntegerValue{32}}, + }, + }, + }, + { + Entity: &pb.Entity{ + Key: key2, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{"Rufus"}}, + // No height for Rufus. + }, + }, + }, + }, + }, + }, nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Height", + Value: int64(32), + }, + { + Name: "Name", + Value: "George", + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + nCall++ + return fakeRunQuery(req) + }, + }, + } + ctx := context.Background() + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := NewKey(ctx, "Gopher", "", 6, nil) + expectedKeys := []*Key{ + key1, + NewKey(ctx, "Gopher", "", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + // Make sure we sort any PropertyList items (the order is not deterministic). + if pLists, ok := tc.dst.(*[]PropertyList); ok { + for _, p := range *pLists { + sort.Sort(byName(p)) + } + } + + if !reflect.DeepEqual(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind() != b.Kind() || a.Name() != b.Name() || a.ID() != b.ID() { + return false + } + a, b = a.Parent(), b.Parent() + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !reflect.DeepEqual(q0, q1) { + t.Errorf("q0 and q1 were not equal") + } + if reflect.DeepEqual(q1, q2) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !reflect.DeepEqual(q3, q5) { + t.Errorf("q3 and q5 were not equal") + } + if reflect.DeepEqual(q5, q6) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + // Quoted and interesting field names. + {"x > y =", true, "x > y", equal}, + {"` x ` =", true, " x ", equal}, + {`" x " =`, true, " x ", equal}, + {`" \"x " =`, true, ` "x `, equal}, + {`" x =`, false, "", 0}, + {`" x ="`, false, "", 0}, + {"` x \" =", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestNamespaceQuery(t *testing.T) { + gotNamespace := make(chan string, 1) + ctx := context.Background() + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + if part := req.PartitionId; part != nil { + gotNamespace <- part.NamespaceId + } else { + gotNamespace <- "" + } + return nil, errors.New("not implemented") + }, + }, + } + + var gs []Gopher + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } + + const ns = "not_default" + ctx = WithNamespace(ctx, ns) + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } +} + +func TestReadOptions(t *testing.T) { + tid := []byte{1} + for _, test := range []struct { + q *Query + want *pb.ReadOptions + }{ + { + q: NewQuery(""), + want: nil, + }, + { + q: NewQuery("").Transaction(nil), + want: nil, + }, + { + q: NewQuery("").Transaction(&Transaction{id: tid}), + want: &pb.ReadOptions{&pb.ReadOptions_Transaction{tid}}, + }, + { + q: NewQuery("").EventualConsistency(), + want: &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}}, + }, + } { + req := &pb.RunQueryRequest{} + if err := test.q.toProto(req); err != nil { + t.Fatalf("%+v: got %v, want no error", test.q, err) + } + if got := req.ReadOptions; !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want) + } + } + // Test errors. + for _, q := range []*Query{ + NewQuery("").Transaction(&Transaction{id: nil}), + NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(), + } { + req := &pb.RunQueryRequest{} + if err := q.toProto(req); err == nil { + t.Errorf("%+v: got nil, wanted error", q) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go new file mode 100644 index 000000000..fbff8bc75 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -0,0 +1,251 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "time" + + timepb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/datastore/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(key, props) +} + +// TODO(djd): Convert this and below to return ([]Property, error). +func saveStructProperty(props *[]Property, name string, noIndex bool, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: noIndex, + } + + switch x := v.Interface().(type) { + case *Key, time.Time, GeoPoint: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.Value = v.Bytes() + } else { + return saveSliceProperty(props, name, noIndex, v) + } + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + sub, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + return sub.save(props, name+".", noIndex) + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +func saveSliceProperty(props *[]Property, name string, noIndex bool, v reflect.Value) error { + // Easy case: if the slice is empty, we're done. + if v.Len() == 0 { + return nil + } + // Work out the properties generated by the first element in the slice. This will + // usually be a single property, but will be more if this is a slice of structs. + var headProps []Property + if err := saveStructProperty(&headProps, name, noIndex, v.Index(0)); err != nil { + return err + } + + // Convert the first element's properties into slice properties, and + // keep track of the values in a map. + values := make(map[string][]interface{}, len(headProps)) + for _, p := range headProps { + values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value) + } + + // Find the elements for the subsequent elements. + for i := 1; i < v.Len(); i++ { + elemProps := make([]Property, 0, len(headProps)) + if err := saveStructProperty(&elemProps, name, noIndex, v.Index(i)); err != nil { + return err + } + for _, p := range elemProps { + v, ok := values[p.Name] + if !ok { + return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i) + } + values[p.Name] = append(v, p.Value) + } + } + + // Convert to the final properties. + for _, p := range headProps { + p.Value = values[p.Name] + *props = append(*props, p) + } + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, "", false); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, prefix string, noIndex bool) error { + for name, f := range s.codec.fields { + name = prefix + name + v := s.v.FieldByIndex(f.path) + if !v.IsValid() || !v.CanSet() { + continue + } + noIndex1 := noIndex || f.noIndex + if err := saveStructProperty(props, name, noIndex1, v); err != nil { + return err + } + } + return nil +} + +func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { + e := &pb.Entity{ + Key: keyToProto(key), + Properties: map[string]*pb.Value{}, + } + indexedProps := 0 + for _, p := range props { + val, err := interfaceToProto(p.Value, p.NoIndex) + if err != nil { + return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name) + } + if !p.NoIndex { + rVal := reflect.ValueOf(p.Value) + if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { + indexedProps += rVal.Len() + } else { + indexedProps++ + } + } + if indexedProps > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + + if _, ok := e.Properties[p.Name]; ok { + return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name) + } + e.Properties[p.Name] = val + } + return e, nil +} + +func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { + val := &pb.Value{ExcludeFromIndexes: noIndex} + switch v := iv.(type) { + case int: + val.ValueType = &pb.Value_IntegerValue{int64(v)} + case int32: + val.ValueType = &pb.Value_IntegerValue{int64(v)} + case int64: + val.ValueType = &pb.Value_IntegerValue{v} + case bool: + val.ValueType = &pb.Value_BooleanValue{v} + case string: + if len(v) > 1500 && !noIndex { + return nil, errors.New("string property too long to index") + } + val.ValueType = &pb.Value_StringValue{v} + case float32: + val.ValueType = &pb.Value_DoubleValue{float64(v)} + case float64: + val.ValueType = &pb.Value_DoubleValue{v} + case *Key: + if v == nil { + val.ValueType = &pb.Value_NullValue{} + } else { + val.ValueType = &pb.Value_KeyValue{keyToProto(v)} + } + case GeoPoint: + if !v.Valid() { + return nil, errors.New("invalid GeoPoint value") + } + val.ValueType = &pb.Value_GeoPointValue{&llpb.LatLng{ + Latitude: v.Lat, + Longitude: v.Lng, + }} + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, errors.New("time value out of range") + } + val.ValueType = &pb.Value_TimestampValue{&timepb.Timestamp{ + Seconds: v.Unix(), + Nanos: int32(v.Nanosecond()), + }} + case []byte: + if len(v) > 1500 && !noIndex { + return nil, errors.New("[]byte property too long to index") + } + val.ValueType = &pb.Value_BlobValue{v} + case []interface{}: + arr := make([]*pb.Value, 0, len(v)) + for i, v := range v { + elem, err := interfaceToProto(v, noIndex) + if err != nil { + return nil, fmt.Errorf("%v at index %d", err, i) + } + arr = append(arr, elem) + } + val.ValueType = &pb.Value_ArrayValue{&pb.ArrayValue{arr}} + // ArrayValues have ExcludeFromIndexes set on the individual items, rather + // than the top-level value. + val.ExcludeFromIndexes = false + default: + if iv != nil { + return nil, fmt.Errorf("invalid Value type %t", iv) + } + val.ValueType = &pb.Value_NullValue{} + } + // TODO(jbd): Support EntityValue. + return val, nil +} diff --git a/vendor/cloud.google.com/go/datastore/save_test.go b/vendor/cloud.google.com/go/datastore/save_test.go new file mode 100644 index 000000000..e5e8344f6 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestInterfaceToProtoNilKey(t *testing.T) { + var iv *Key + pv, err := interfaceToProto(iv, false) + if err != nil { + t.Fatalf("nil key: interfaceToProto: %v", err) + } + + _, ok := pv.ValueType.(*pb.Value_NullValue) + if !ok { + t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{}) + } +} diff --git a/vendor/cloud.google.com/go/datastore/testdata/index.yaml b/vendor/cloud.google.com/go/datastore/testdata/index.yaml new file mode 100644 index 000000000..47bc9de86 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/testdata/index.yaml @@ -0,0 +1,41 @@ +indexes: + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + direction: desc + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + - name: U \ No newline at end of file diff --git a/vendor/cloud.google.com/go/datastore/time.go b/vendor/cloud.google.com/go/datastore/time.go new file mode 100644 index 000000000..e7f6a1931 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "math" + "time" +) + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3) +} diff --git a/vendor/cloud.google.com/go/datastore/time_test.go b/vendor/cloud.google.com/go/datastore/time_test.go new file mode 100644 index 000000000..5cc846c4c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/cloud.google.com/go/datastore/transaction.go b/vendor/cloud.google.com/go/datastore/transaction.go new file mode 100644 index 000000000..dff8c8bcc --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction.go @@ -0,0 +1,308 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +var errExpiredTransaction = errors.New("datastore: transaction expired") + +type transactionSettings struct { + attempts int +} + +// newTransactionSettings creates a transactionSettings with a given TransactionOption slice. +// Unconfigured options will be set to default values. +func newTransactionSettings(opts []TransactionOption) *transactionSettings { + s := &transactionSettings{attempts: 3} + for _, o := range opts { + o.apply(s) + } + return s +} + +// TransactionOption configures the way a transaction is executed. +type TransactionOption interface { + apply(*transactionSettings) +} + +// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times. +func MaxAttempts(attempts int) TransactionOption { + return maxAttempts(attempts) +} + +type maxAttempts int + +func (w maxAttempts) apply(s *transactionSettings) { + if w > 0 { + s.attempts = int(w) + } +} + +// Transaction represents a set of datastore operations to be committed atomically. +// +// Operations are enqueued by calling the Put and Delete methods on Transaction +// (or their Multi-equivalents). These operations are only committed when the +// Commit method is invoked. To ensure consistency, reads must be performed by +// using Transaction's Get method or by using the Transaction method when +// building a query. +// +// A Transaction must be committed or rolled back exactly once. +type Transaction struct { + id []byte + client *Client + ctx context.Context + mutations []*pb.Mutation // The mutations to apply. + pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion. +} + +// NewTransaction starts a new transaction. +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { + for _, o := range opts { + if _, ok := o.(maxAttempts); ok { + return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") + } + } + req := &pb.BeginTransactionRequest{ + ProjectId: c.dataset, + } + resp, err := c.client.BeginTransaction(ctx, req) + if err != nil { + return nil, err + } + + return &Transaction{ + id: resp.Transaction, + ctx: ctx, + client: c, + mutations: nil, + pending: make(map[int]*PendingKey), + }, nil +} + +// RunInTransaction runs f in a transaction. f is invoked with a Transaction +// that f should use for all the transaction's datastore operations. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunInTransaction commits the transaction, +// returning the Commit and a nil error if it succeeds. If the commit fails due +// to a conflicting transaction, RunInTransaction retries f with a new +// Transaction. It gives up and returns ErrConcurrentTransaction after three +// failed attempts (or as configured with MaxAttempts). +// +// If f returns non-nil, then the transaction will be rolled back and +// RunInTransaction will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent. +// Note that Transaction.Get is not idempotent when unmarshaling slice fields. +func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) { + settings := newTransactionSettings(opts) + for n := 0; n < settings.attempts; n++ { + tx, err := c.NewTransaction(ctx) + if err != nil { + return nil, err + } + if err := f(tx); err != nil { + tx.Rollback() + return nil, err + } + if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { + return cmt, err + } + } + return nil, ErrConcurrentTransaction +} + +// Commit applies the enqueued operations atomically. +func (t *Transaction) Commit() (*Commit, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + req := &pb.CommitRequest{ + ProjectId: t.client.dataset, + TransactionSelector: &pb.CommitRequest_Transaction{t.id}, + Mutations: t.mutations, + Mode: pb.CommitRequest_TRANSACTIONAL, + } + t.id = nil + resp, err := t.client.client.Commit(t.ctx, req) + if err != nil { + if grpc.Code(err) == codes.Aborted { + return nil, ErrConcurrentTransaction + } + return nil, err + } + + // Copy any newly minted keys into the returned keys. + commit := &Commit{} + for i, p := range t.pending { + if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil { + return nil, errors.New("datastore: internal error: server returned the wrong mutation results") + } + key, err := protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + p.key = key + p.commit = commit + } + + return commit, nil +} + +// Rollback abandons a pending transaction. +func (t *Transaction) Rollback() error { + if t.id == nil { + return errExpiredTransaction + } + id := t.id + t.id = nil + _, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ + ProjectId: t.client.dataset, + Transaction: id, + }) + return err +} + +// Get is the transaction-specific version of the package function Get. +// All reads performed during the transaction will come from a single consistent +// snapshot. Furthermore, if the transaction is set to a serializable isolation +// level, another transaction cannot concurrently modify the data that is read +// or modified by this transaction. +func (t *Transaction) Get(key *Key, dst interface{}) error { + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{t.id}, + } + err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { + if t.id == nil { + return errExpiredTransaction + } + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{t.id}, + } + return t.client.get(t.ctx, keys, dst, opts) +} + +// Put is the transaction-specific version of the package function Put. +// +// Put returns a PendingKey which can be resolved into a Key using the +// return value from a successful Commit. If key is an incomplete key, the +// returned pending key will resolve to a unique key generated by the +// datastore. +func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { + h, err := t.PutMulti([]*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return h[0], nil +} + +// PutMulti is a batch version of Put. One PendingKey is returned for each +// element of src in the same order. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, mutations...) + + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(keys)) + for i, key := range keys { + p := &PendingKey{} + if key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = key + } + ret[i] = p + } + + return ret, nil +} + +// Delete is the transaction-specific version of the package function Delete. +// Delete enqueues the deletion of the entity for the given key, to be +// committed atomically upon calling Commit. +func (t *Transaction) Delete(key *Key) error { + err := t.DeleteMulti([]*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (t *Transaction) DeleteMulti(keys []*Key) error { + if t.id == nil { + return errExpiredTransaction + } + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + t.mutations = append(t.mutations, mutations...) + return nil +} + +// Commit represents the result of a committed transaction. +type Commit struct{} + +// Key resolves a pending key handle into a final key. +func (c *Commit) Key(p *PendingKey) *Key { + if c != p.commit { + panic("PendingKey was not created by corresponding transaction") + } + return p.key +} + +// PendingKey represents the key for newly-inserted entity. It can be +// resolved into a Key by calling the Key method of Commit. +type PendingKey struct { + key *Key + commit *Commit +} diff --git a/vendor/cloud.google.com/go/errors/errors.go b/vendor/cloud.google.com/go/errors/errors.go new file mode 100644 index 000000000..cbe575634 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/errors.go @@ -0,0 +1,315 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errors is a Google Stackdriver Error Reporting library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/error-reporting/ for more information. +// +// To initialize a client, use the NewClient function. Generally you will want +// to do this on program initialization. The NewClient function takes as +// arguments a context, the project name, a service name, and a version string. +// The service name and version string identify the running program, and are +// included in error reports. The version string can be left empty. +// +// import "cloud.google.com/go/errors" +// ... +// errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0") +// +// The client can recover panics in your program and report them as errors. +// To use this functionality, defer its Catch method, as you would any other +// function for recovering panics. +// +// func foo(ctx context.Context, ...) { +// defer errorsClient.Catch(ctx) +// ... +// } +// +// Catch writes an error report containing the recovered value and a stack trace +// to the log named "errorreports" using a Stackdriver Logging client. +// +// There are various options you can add to the call to Catch that modify how +// panics are handled. +// +// WithMessage and WithMessagef add a custom message after the recovered value, +// using fmt.Sprint and fmt.Sprintf respectively. +// +// defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x)) +// +// WithRequest fills in various fields in the error report with information +// about an http.Request that's being handled. +// +// defer errorsClient.Catch(ctx, errors.WithRequest(httpReq)) +// +// By default, after recovering a panic, Catch will panic again with the +// recovered value. You can turn off this behavior with the Repanic option. +// +// defer errorsClient.Catch(ctx, errors.Repanic(false)) +// +// You can also change the default behavior for the client by changing the +// RepanicDefault field. +// +// errorsClient.RepanicDefault = false +// +// It is also possible to write an error report directly without recovering a +// panic, using Report or Reportf. +// +// if err != nil { +// errorsClient.Reportf(ctx, r, "unexpected error %v", err) +// } +// +// If you try to write an error report with a nil client, or if the logging +// client fails to write the report to the Stackdriver Logging server, the error +// report is logged using log.Println. +package errors // import "cloud.google.com/go/errors" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "runtime" + "strings" + "time" + + "cloud.google.com/go/logging" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +const ( + userAgent = `gcloud-golang-errorreporting/20160701` +) + +type Client struct { + loggingClient *logging.Client + projectID string + serviceContext map[string]string + + // RepanicDefault determines whether Catch will re-panic after recovering a + // panic. This behavior can be overridden for an individual call to Catch using + // the Repanic option. + RepanicDefault bool +} + +func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, opts ...option.ClientOption) (*Client, error) { + l, err := logging.NewClient(ctx, projectID, "errorreports", opts...) + if err != nil { + return nil, fmt.Errorf("creating Logging client: %v", err) + } + c := &Client{ + loggingClient: l, + projectID: projectID, + RepanicDefault: true, + serviceContext: map[string]string{ + "service": serviceName, + }, + } + if serviceVersion != "" { + c.serviceContext["version"] = serviceVersion + } + return c, nil +} + +// An Option is an optional argument to Catch. +type Option interface { + isOption() +} + +// PanicFlag returns an Option that can inform Catch that a panic has occurred. +// If *p is true when Catch is called, an error report is made even if recover +// returns nil. This allows Catch to report an error for panic(nil). +// If p is nil, the option is ignored. +// +// Here is an example of how to use PanicFlag: +// +// func foo(ctx context.Context, ...) { +// hasPanicked := true +// defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked)) +// ... +// ... +// // We have reached the end of the function, so we're not panicking. +// hasPanicked = false +// } +func PanicFlag(p *bool) Option { return panicFlag{p} } + +type panicFlag struct { + *bool +} + +func (h panicFlag) isOption() {} + +// Repanic returns an Option that determines whether Catch will re-panic after +// it reports an error. This overrides the default in the client. +func Repanic(r bool) Option { return repanic(r) } + +type repanic bool + +func (r repanic) isOption() {} + +// WithRequest returns an Option that informs Catch or Report of an http.Request +// that is being handled. Information from the Request is included in the error +// report, if one is made. +func WithRequest(r *http.Request) Option { return withRequest{r} } + +type withRequest struct { + *http.Request +} + +func (w withRequest) isOption() {} + +// WithMessage returns an Option that sets a message to be included in the error +// report, if one is made. v is converted to a string with fmt.Sprint. +func WithMessage(v ...interface{}) Option { return message(v) } + +type message []interface{} + +func (m message) isOption() {} + +// WithMessagef returns an Option that sets a message to be included in the error +// report, if one is made. format and v are converted to a string with fmt.Sprintf. +func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} } + +type messagef struct { + format string + v []interface{} +} + +func (m messagef) isOption() {} + +// Catch tries to recover a panic; if it succeeds, it writes an error report. +// It should be called by deferring it, like any other function for recovering +// panics. +// +// Catch can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Catch(ctx context.Context, opt ...Option) { + panicked := false + for _, o := range opt { + switch o := o.(type) { + case panicFlag: + panicked = panicked || o.bool != nil && *o.bool + } + } + x := recover() + if x == nil && !panicked { + return + } + var ( + r *http.Request + shouldRepanic = true + messages = []string{fmt.Sprint(x)} + ) + if c != nil { + shouldRepanic = c.RepanicDefault + } + for _, o := range opt { + switch o := o.(type) { + case repanic: + shouldRepanic = bool(o) + case withRequest: + r = o.Request + case message: + messages = append(messages, fmt.Sprint(o...)) + case messagef: + messages = append(messages, fmt.Sprintf(o.format, o.v...)) + } + } + c.logInternal(ctx, r, true, strings.Join(messages, " ")) + if shouldRepanic { + panic(x) + } +} + +// Report writes an error report unconditionally, instead of only when a panic +// occurs. +// If r is non-nil, information from the Request is included in the error report. +// +// Report can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) { + c.logInternal(ctx, r, false, fmt.Sprint(v...)) +} + +// Reportf writes an error report unconditionally, instead of only when a panic +// occurs. +// If r is non-nil, information from the Request is included in the error report. +// +// Reportf can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) { + c.logInternal(ctx, r, false, fmt.Sprintf(format, v...)) +} + +func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) { + payload := map[string]interface{}{ + "eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano), + } + // limit the stack trace to 16k. + var buf [16384]byte + stack := buf[0:runtime.Stack(buf[:], false)] + payload["message"] = msg + "\n" + chopStack(stack, isPanic) + if r != nil { + payload["context"] = map[string]interface{}{ + "httpRequest": map[string]interface{}{ + "method": r.Method, + "url": r.Host + r.RequestURI, + "userAgent": r.UserAgent(), + "referrer": r.Referer(), + "remoteIp": r.RemoteAddr, + }, + } + } + if c == nil { + log.Println("Error report used nil client:", payload) + return + } + payload["serviceContext"] = c.serviceContext + e := logging.Entry{ + Level: logging.Error, + Payload: payload, + } + err := c.loggingClient.LogSync(e) + if err != nil { + log.Println("Error writing error report:", err, "report:", payload) + } +} + +// chopStack trims a stack trace so that the function which panics or calls +// Report is first. +func chopStack(s []byte, isPanic bool) string { + var f []byte + if isPanic { + f = []byte("panic(") + } else { + f = []byte("cloud.google.com/go/errors.(*Client).Report") + } + + lfFirst := bytes.IndexByte(s, '\n') + if lfFirst == -1 { + return string(s) + } + stack := s[lfFirst:] + panicLine := bytes.Index(stack, f) + if panicLine == -1 { + return string(s) + } + stack = stack[panicLine+1:] + for i := 0; i < 2; i++ { + nextLine := bytes.IndexByte(stack, '\n') + if nextLine == -1 { + return string(s) + } + stack = stack[nextLine+1:] + } + return string(s[:lfFirst+1]) + string(stack) +} diff --git a/vendor/cloud.google.com/go/errors/errors_test.go b/vendor/cloud.google.com/go/errors/errors_test.go new file mode 100644 index 000000000..f1fec7103 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/errors_test.go @@ -0,0 +1,218 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors_test + +import ( + "bytes" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + + "cloud.google.com/go/errors" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +const testProjectID = "testproject" + +type fakeRoundTripper struct { + req *http.Request + fail bool + body string +} + +func newFakeRoundTripper() *fakeRoundTripper { + return &fakeRoundTripper{} +} + +func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + rt.req = r + body, err := ioutil.ReadAll(r.Body) + if err != nil { + panic(err) + } + rt.body = string(body) + if rt.fail { + return &http.Response{ + Status: "503 Service Unavailable", + StatusCode: 503, + Body: ioutil.NopCloser(strings.NewReader("{}")), + }, nil + } + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + }, nil +} + +func newTestClient(rt http.RoundTripper) *errors.Client { + t, err := errors.NewClient(context.Background(), testProjectID, "myservice", "v1.000", option.WithHTTPClient(&http.Client{Transport: rt})) + if err != nil { + panic(err) + } + t.RepanicDefault = false + return t +} + +var ctx context.Context + +func init() { + ctx = context.Background() +} + +func TestCatchNothing(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + defer func() { + r := rt.req + if r != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx) +} + +func commonChecks(t *testing.T, body, panickingFunction string) { + if !strings.Contains(body, "myservice") { + t.Errorf("error report didn't contain service name") + } + if !strings.Contains(body, "v1.000") { + t.Errorf("error report didn't contain version name") + } + if !strings.Contains(body, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(body, panickingFunction) { + t.Errorf("error report didn't contain stack trace") + } +} + +func TestCatchPanic(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + defer func() { + r := rt.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, rt.body, "errors_test.TestCatchPanic") + if !strings.Contains(rt.body, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, errors.WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchPanicNilClient(t *testing.T) { + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.Bytes() + if !strings.Contains(string(body), "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + if !strings.Contains(string(body), "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(string(body), "errors_test.TestCatchPanicNilClient") { + t.Errorf("error report didn't contain recovered value") + } + }() + var c *errors.Client + defer c.Catch(ctx, errors.WithMessage("hello, error")) + var x int + x = x / x +} + +func TestLogFailedReports(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + rt.fail = true + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.Bytes() + commonChecks(t, string(body), "errors_test.TestLogFailedReports") + if !strings.Contains(string(body), "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, errors.WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchNilPanic(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + defer func() { + r := rt.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, rt.body, "errors_test.TestCatchNilPanic") + if !strings.Contains(rt.body, "nil") { + t.Errorf("error report didn't contain recovered value") + } + }() + b := true + defer c.Catch(ctx, errors.WithMessage("hello, error"), errors.PanicFlag(&b)) + panic(nil) +} + +func TestNotCatchNilPanic(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + defer func() { + r := rt.req + if r != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx, errors.WithMessage("hello, error")) + panic(nil) +} + +func TestReport(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + c.Report(ctx, nil, "hello, ", "error") + r := rt.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, rt.body, "errors_test.TestReport") +} + +func TestReportf(t *testing.T) { + rt := &fakeRoundTripper{} + c := newTestClient(rt) + c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) + r := rt.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, rt.body, "errors_test.TestReportf") + if !strings.Contains(rt.body, "2+2=4") { + t.Errorf("error report didn't contain formatted message") + } +} diff --git a/vendor/cloud.google.com/go/errors/stack_test.go b/vendor/cloud.google.com/go/errors/stack_test.go new file mode 100644 index 000000000..25cd4c60b --- /dev/null +++ b/vendor/cloud.google.com/go/errors/stack_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "testing" + +func TestChopStack(t *testing.T) { + for _, test := range []struct { + name string + in []byte + expected string + isPanic bool + }{ + { + name: "Catch", + in: []byte(`goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +panic() + /gopath/src/runtime/panic.go:458 +0x243 +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`), + expected: `goroutine 20 [running]: +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`, + isPanic: true, + }, + { + name: "function not found", + in: []byte(`goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`), + expected: `goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`, + isPanic: true, + }, + { + name: "Report", + in: []byte(` goroutine 39 [running]: +runtime/debug.Stack() + /gopath/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Report() + /gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed +cloud.google.com/go/errors_test.TestReport() + /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`), + expected: ` goroutine 39 [running]: +cloud.google.com/go/errors_test.TestReport() + /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`, + isPanic: false, + }, + } { + out := chopStack(test.in, test.isPanic) + if out != test.expected { + t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected) + } + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go b/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go new file mode 100644 index 000000000..55e8ba3d9 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go @@ -0,0 +1,92 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// concat_table is an example client of the bigquery client library. +// It concatenates two BigQuery tables and writes the result to another table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate") + src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + if *src1 == *src2 || *src1 == *dest || *src2 == *dest { + log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest") + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + s1 := client.Dataset(*dataset).Table(*src1) + s2 := client.Dataset(*dataset).Table(*src2) + d := client.Dataset(*dataset).Table(*dest) + + // Concatenate data. + job, err := client.Copy(ctx, d, bigquery.Tables{s1, s2}, bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Concatenating: %v", err) + } + + fmt.Printf("Job for concatenation operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/load/main.go b/vendor/cloud.google.com/go/examples/bigquery/load/main.go new file mode 100644 index 000000000..2f7e7e123 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/load/main.go @@ -0,0 +1,95 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// load is an example client of the bigquery client library. +// It loads a file from Google Cloud Storage into a BigQuery table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", "", "The ID of a BigQuery table to load data into") + bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from") + object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket") + skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "table", "bucket", "object"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + table := client.Dataset(*dataset).Table(*table) + + gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object)) + gcs.SkipLeadingRows = *skiprows + + // Load data from Google Cloud Storage into a BigQuery table. + job, err := client.Copy( + ctx, table, gcs, + bigquery.MaxBadRecords(1), + bigquery.AllowQuotedNewlines(), + bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Loading data: %v", err) + } + + fmt.Printf("Job for data load operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/query/main.go b/vendor/cloud.google.com/go/examples/bigquery/query/main.go new file mode 100644 index 000000000..ef496a8d2 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/query/main.go @@ -0,0 +1,100 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// query is an example client of the bigquery client library. +// It submits a query and writes the result to a table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + q = flag.String("q", "", "The query string") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") + wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "q"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + d := &bigquery.Table{} + if *dest != "" { + d = client.Dataset(*dataset).Table(*dest) + } + + query := &bigquery.Query{ + Q: *q, + DefaultProjectID: *project, + DefaultDatasetID: *dataset, + } + + // Query data. + job, err := client.Copy(ctx, d, query, bigquery.WriteTruncate) + + if err != nil { + log.Fatalf("Querying: %v", err) + } + + fmt.Printf("Submitted query. Job ID: %s\n", job.ID()) + if !*wait { + return + } + + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/read/main.go b/vendor/cloud.google.com/go/examples/bigquery/read/main.go new file mode 100644 index 000000000..ab00e585c --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/read/main.go @@ -0,0 +1,143 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// read is an example client of the bigquery client library. +// It reads from a table, returning the data via an Iterator. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "strings" + "text/tabwriter" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", ".*", "A regular expression to match the IDs of tables to read.") + jobID = flag.String("jobid", "", "The ID of a query job that has already been submitted."+ + " If set, --dataset, --table will be ignored, and results will be read from the specified job.") +) + +func printValues(ctx context.Context, it *bigquery.Iterator) { + // one-space padding. + tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + + for it.Next(ctx) { + var vals bigquery.ValueList + if err := it.Get(&vals); err != nil { + fmt.Printf("err calling get: %v\n", err) + } else { + sep := "" + for _, v := range vals { + fmt.Fprintf(tw, "%s%v", sep, v) + sep = "\t" + } + fmt.Fprintf(tw, "\n") + } + } + tw.Flush() + + fmt.Printf("\n") + if err := it.Err(); err != nil { + fmt.Printf("err reading: %v\n", err) + } +} + +func printTable(ctx context.Context, client *bigquery.Client, t *bigquery.Table) { + it, err := client.Read(ctx, t) + if err != nil { + log.Fatalf("Reading: %v", err) + } + + id := t.FullyQualifiedName() + fmt.Printf("%s\n%s\n", id, strings.Repeat("-", len(id))) + printValues(ctx, it) +} + +func printQueryResults(ctx context.Context, client *bigquery.Client, queryJobID string) { + job, err := client.JobFromID(ctx, queryJobID) + if err != nil { + log.Fatalf("Loading job: %v", err) + } + + it, err := client.Read(ctx, job) + if err != nil { + log.Fatalf("Reading: %v", err) + } + + // TODO: print schema. + printValues(ctx, it) +} + +func main() { + flag.Parse() + + flagsOk := true + if flag.Lookup("project").Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --project is required\n") + flagsOk = false + } + + var sourceFlagCount int + if flag.Lookup("dataset").Value.String() != "" { + sourceFlagCount++ + } + if flag.Lookup("jobid").Value.String() != "" { + sourceFlagCount++ + } + if sourceFlagCount != 1 { + fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n") + flagsOk = false + } + + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + tableRE, err := regexp.Compile(*table) + if err != nil { + fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table) + os.Exit(1) + } + + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + if *jobID != "" { + printQueryResults(ctx, client, *jobID) + return + } + ds := client.Dataset(*dataset) + var tables []*bigquery.Table + tables, err = ds.ListTables(context.Background()) + if err != nil { + log.Fatalf("Listing tables: %v", err) + } + for _, t := range tables { + if tableRE.MatchString(t.TableID) { + printTable(ctx, client, t) + } + } +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md b/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md new file mode 100644 index 000000000..b96ba53cc --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md @@ -0,0 +1,46 @@ +# Cloud Bigtable Hello World in Go + +This is a simple application that demonstrates using the [Google Cloud APIs Go +Client Library](https://github.com/GoogleCloudPlatform/google-cloud-go) to connect +to and interact with Cloud Bigtable. + +## Prerequisites + +1. Set up Cloud Console. + 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. + You will need the project ID later. + 1. Go to **Settings > Project Billing Settings** and enable billing. + 1. Select **APIs & Auth > APIs**. + 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. + (You may need to search for the API). +1. Set up gcloud. + 1. `gcloud components update` + 1. `gcloud auth login` + 1. `gcloud config set project PROJECT_ID` +1. Provision a Cloud Bigtable instance + 1. Follow the instructions in the [user +documentation](https://cloud.google.com/bigtable/docs/creating-instance) to +create a Google Cloud Platform project and Cloud Bigtable instance if necessary. + 1. You'll need to reference your project id and instance id to run the application. + +## Running + +1. From the hello_world example folder, `go run main.go -project PROJECT_ID -instance INSTANCE_ID`, substituting your project id and instance id. + +## Cleaning up + +To avoid incurring extra charges to your Google Cloud Platform account, remove +the resources created for this sample. + +1. Go to the Clusters page in the [Cloud + Console](https://console.cloud.google.com). + + [Go to the Clusters page](https://console.cloud.google.com/project/_/bigtable/clusters) + +1. Click the cluster name. + +1. Click **Delete**. + + ![Delete](https://cloud.google.com/bigtable/img/delete-quickstart-cluster.png) + +1. Type the cluster ID, then click **Delete** to delete the cluster. diff --git a/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go b/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go new file mode 100644 index 000000000..fa6692c7a --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go @@ -0,0 +1,157 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Hello world is a sample program demonstrating use of the Bigtable client +// library to perform basic CRUD operations +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" +) + +// User-provided constants. +const ( + tableName = "Hello-Bigtable" + columnFamilyName = "cf1" + columnName = "greeting" +) + +var greetings = []string{"Hello World!", "Hello Cloud Bigtable!", "Hello golang!"} + +// sliceContains reports whether the provided string is present in the given slice of strings. +func sliceContains(list []string, target string) bool { + for _, s := range list { + if s == target { + return true + } + } + return false +} + +func main() { + project := flag.String("project", "", "The Google Cloud Platform project ID. Required.") + instance := flag.String("instance", "", "The Google Cloud Bigtable instance ID. Required.") + flag.Parse() + + for _, f := range []string{"project", "instance"} { + if flag.Lookup(f).Value.String() == "" { + log.Fatalf("The %s flag is required.", f) + } + } + + ctx := context.Background() + + // Set up admin client, tables, and column families. + // NewAdminClient uses Application Default Credentials to authenticate. + adminClient, err := bigtable.NewAdminClient(ctx, *project, *instance) + if err != nil { + log.Fatalf("Could not create admin client: %v", err) + } + + tables, err := adminClient.Tables(ctx) + if err != nil { + log.Fatalf("Could not fetch table list: %v", err) + } + + if !sliceContains(tables, tableName) { + log.Printf("Creating table %s", tableName) + if err := adminClient.CreateTable(ctx, tableName); err != nil { + log.Fatalf("Could not create table %s: %v", tableName, err) + } + } + + tblInfo, err := adminClient.TableInfo(ctx, tableName) + if err != nil { + log.Fatalf("Could not read info for table %s: %v", tableName, err) + } + + if !sliceContains(tblInfo.Families, columnFamilyName) { + if err := adminClient.CreateColumnFamily(ctx, tableName, columnFamilyName); err != nil { + log.Fatalf("Could not create column family %s: %v", columnFamilyName, err) + } + } + + // Set up Bigtable data operations client. + // NewClient uses Application Default Credentials to authenticate. + client, err := bigtable.NewClient(ctx, *project, *instance) + if err != nil { + log.Fatalf("Could not create data operations client: %v", err) + } + + tbl := client.Open(tableName) + muts := make([]*bigtable.Mutation, len(greetings)) + rowKeys := make([]string, len(greetings)) + + log.Printf("Writing greeting rows to table") + for i, greeting := range greetings { + muts[i] = bigtable.NewMutation() + muts[i].Set(columnFamilyName, columnName, bigtable.Now(), []byte(greeting)) + + // Each row has a unique row key. + // + // Note: This example uses sequential numeric IDs for simplicity, but + // this can result in poor performance in a production application. + // Since rows are stored in sorted order by key, sequential keys can + // result in poor distribution of operations across nodes. + // + // For more information about how to design a Bigtable schema for the + // best performance, see the documentation: + // + // https://cloud.google.com/bigtable/docs/schema-design + rowKeys[i] = fmt.Sprintf("%s%d", columnName, i) + } + + rowErrs, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + log.Fatalf("Could not apply bulk row mutation: %v", err) + } + if rowErrs != nil { + for _, rowErr := range rowErrs { + log.Printf("Error writing row: %v", rowErr) + } + log.Fatalf("Could not write some rows") + } + + log.Printf("Getting a single greeting by row key:") + row, err := tbl.ReadRow(ctx, rowKeys[0], bigtable.RowFilter(bigtable.ColumnFilter(columnName))) + if err != nil { + log.Fatalf("Could not read row with key %s: %v", rowKeys[0], err) + } + log.Printf("\t%s = %s\n", rowKeys[0], string(row[columnFamilyName][0].Value)) + + log.Printf("Reading all greeting rows:") + err = tbl.ReadRows(ctx, bigtable.PrefixRange(columnName), func(row bigtable.Row) bool { + item := row[columnFamilyName][0] + log.Printf("\t%s = %s\n", item.Row, string(item.Value)) + return true + }, bigtable.RowFilter(bigtable.ColumnFilter(columnName))) + + if err = client.Close(); err != nil { + log.Fatalf("Could not close data operations client: %v", err) + } + + log.Printf("Deleting the table") + if err = adminClient.DeleteTable(ctx, tableName); err != nil { + log.Fatalf("Could not delete table %s: %v", tableName, err) + } + + if err = adminClient.Close(); err != nil { + log.Fatalf("Could not close admin client: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/search/search.go b/vendor/cloud.google.com/go/examples/bigtable/search/search.go new file mode 100644 index 000000000..2049e5fe1 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/search/search.go @@ -0,0 +1,453 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Search is a sample web server that uses Cloud Bigtable as the storage layer +// for a simple document-storage and full-text-search service. +// It has four functions: +// - Initialize and clear the table. +// - Add a document. This adds the content of a user-supplied document to the +// Bigtable, and adds references to the document to an index in the Bigtable. +// The document is indexed under each unique word in the document. +// - Search the index. This returns documents containing each word in a user +// query, with snippets and links to view the whole document. +// - Copy table. This copies the documents and index from another table and +// adds them to the current one. +package main + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "io" + "log" + "net/http" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" +) + +var ( + addTemplate = template.Must(template.New("").Parse(` +Added {{.Title}} +`)) + + contentTemplate = template.Must(template.New("").Parse(` +{{.Title}}

+{{.Content}} +`)) + + searchTemplate = template.Must(template.New("").Parse(` +Results for {{.Query}}:

+{{range .Results}} +{{.Title}}
+{{.Snippet}}

+{{end}} +`)) +) + +const ( + indexColumnFamily = "i" + contentColumnFamily = "c" + mainPage = ` + + + Document Search + + + Initialize and clear table: +
+
+ + + Search for documents: +
+
+
+ + + Add a document: +
+ Document name: +
+ Document text: +
+
+ + + Copy data from another table: +
+ Source table name: +
+
+ + + + ` +) + +func main() { + var ( + project = flag.String("project", "", "The name of the project.") + instance = flag.String("instance", "", "The name of the Cloud Bigtable instance.") + tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.") + port = flag.Int("port", 8080, "TCP port for server.") + ) + flag.Parse() + + // Make an admin client. + adminClient, err := bigtable.NewAdminClient(context.Background(), *project, *instance) + if err != nil { + log.Fatal("Bigtable NewAdminClient:", err) + } + + // Make a regular client. + client, err := bigtable.NewClient(context.Background(), *project, *instance) + if err != nil { + log.Fatal("Bigtable NewClient:", err) + } + + // Open the table. + table := client.Open(*tableName) + + // Set up HTML handlers, and start the web server. + http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { handleSearch(w, r, table) }) + http.HandleFunc("/content", func(w http.ResponseWriter, r *http.Request) { handleContent(w, r, table) }) + http.HandleFunc("/add", func(w http.ResponseWriter, r *http.Request) { handleAddDoc(w, r, table) }) + http.HandleFunc("/reset", func(w http.ResponseWriter, r *http.Request) { handleReset(w, r, *tableName, adminClient) }) + http.HandleFunc("/copy", func(w http.ResponseWriter, r *http.Request) { handleCopy(w, r, *tableName, client, adminClient) }) + http.HandleFunc("/", handleMain) + log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil)) +} + +// handleMain outputs the home page. +func handleMain(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, mainPage) +} + +// tokenize splits a string into tokens. +// This is very simple, it's not a good tokenization function. +func tokenize(s string) []string { + wordMap := make(map[string]bool) + f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) }) + for _, word := range f { + word = strings.ToLower(word) + wordMap[word] = true + } + words := make([]string, 0, len(wordMap)) + for word := range wordMap { + words = append(words, word) + } + return words +} + +// handleContent fetches the content of a document from the Bigtable and returns it. +func handleContent(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "No document name supplied.", http.StatusBadRequest) + return + } + + row, err := table.ReadRow(ctx, name) + if err != nil { + http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError) + return + } + content := row[contentColumnFamily] + if len(content) == 0 { + http.Error(w, "Document not found.", http.StatusNotFound) + return + } + var buf bytes.Buffer + if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleSearch responds to search queries, returning links and snippets for matching documents. +func handleSearch(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + query := r.FormValue("q") + // Split the query into words. + words := tokenize(query) + if len(words) == 0 { + http.Error(w, "Empty query.", http.StatusBadRequest) + return + } + + // readRows reads from many rows concurrently. + readRows := func(rows []string) ([]bigtable.Row, error) { + results := make([]bigtable.Row, len(rows)) + errors := make([]error, len(rows)) + var wg sync.WaitGroup + for i, row := range rows { + wg.Add(1) + go func(i int, row string) { + defer wg.Done() + results[i], errors[i] = table.ReadRow(ctx, row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + }(i, row) + } + wg.Wait() + for _, err := range errors { + if err != nil { + return nil, err + } + } + return results, nil + } + + // For each query word, get the list of documents containing it. + results, err := readRows(words) + if err != nil { + http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError) + return + } + + // Count how many of the query words each result contained. + hits := make(map[string]int) + for _, r := range results { + for _, r := range r[indexColumnFamily] { + hits[r.Column]++ + } + } + + // Build a slice of all the documents that matched every query word. + var matches []string + for doc, count := range hits { + if count == len(words) { + matches = append(matches, doc[len(indexColumnFamily+":"):]) + } + } + + // Fetch the content of those documents from the Bigtable. + content, err := readRows(matches) + if err != nil { + http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError) + return + } + + type result struct{ Title, Snippet string } + data := struct { + Query string + Results []result + }{query, nil} + + // Output links and snippets. + for i, doc := range matches { + var text string + c := content[i][contentColumnFamily] + if len(c) > 0 { + text = string(c[0].Value) + } + if len(text) > 100 { + text = text[:100] + "..." + } + data.Results = append(data.Results, result{doc, text}) + } + var buf bytes.Buffer + if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleAddDoc adds a document to the index. +func handleAddDoc(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + + ctx, _ := context.WithTimeout(context.Background(), time.Minute) + + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "Empty document name!", http.StatusBadRequest) + return + } + + content := r.FormValue("content") + if len(content) == 0 { + http.Error(w, "Empty document content!", http.StatusBadRequest) + return + } + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + + // writeOneColumn writes one column in one row, updates err if there is an error, + // and signals wg that one operation has finished. + writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { + mut := bigtable.NewMutation() + mut.Set(family, column, ts, []byte(value)) + err := table.Apply(ctx, row, mut) + if err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + } + + // Start a write to store the document content. + wg.Add(1) + go func() { + writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) + wg.Done() + }() + + // Start writes to store the document name in the index for each word in the document. + words := tokenize(content) + for _, word := range words { + var ( + row = word + family = indexColumnFamily + column = name + value = "" + ts = bigtable.Now() + ) + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + writeOneColumn(row, family, column, value, ts) + wg.Done() + }() + } + wg.Wait() + if writeErr != nil { + http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) + return + } + var buf bytes.Buffer + if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleReset deletes the table if it exists, creates it again, and creates its column families. +func handleReset(w http.ResponseWriter, r *http.Request, table string, adminClient *bigtable.AdminClient) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + adminClient.DeleteTable(ctx, table) + if err := adminClient.CreateTable(ctx, table); err != nil { + http.Error(w, "Error creating Bigtable: "+err.Error(), http.StatusInternalServerError) + return + } + time.Sleep(20 * time.Second) + // Create two column families, and set the GC policy for each one to keep one version. + for _, family := range []string{indexColumnFamily, contentColumnFamily} { + if err := adminClient.CreateColumnFamily(ctx, table, family); err != nil { + http.Error(w, "Error creating column family: "+err.Error(), http.StatusInternalServerError) + return + } + if err := adminClient.SetGCPolicy(ctx, table, family, bigtable.MaxVersionsPolicy(1)); err != nil { + http.Error(w, "Error setting GC policy: "+err.Error(), http.StatusInternalServerError) + return + } + } + w.Write([]byte("Done.")) + return +} + +// copyTable copies data from one table to another. +func copyTable(src, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) error { + if src == "" || src == dst { + return nil + } + ctx, _ := context.WithTimeout(context.Background(), time.Minute) + + // Open the source and destination tables. + srcTable := client.Open(src) + dstTable := client.Open(dst) + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + copyRowToTable := func(row bigtable.Row) bool { + mu.Lock() + failed := writeErr != nil + mu.Unlock() + if failed { + return false + } + mut := bigtable.NewMutation() + for family, items := range row { + for _, item := range items { + // Get the column name, excluding the column family name and ':' character. + columnWithoutFamily := item.Column[len(family)+1:] + mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) + } + } + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + if err := dstTable.Apply(ctx, row.Key(), mut); err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + wg.Done() + }() + return true + } + + // Create a filter that only accepts the column families we're interested in. + filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) + // Read every row from srcTable, and call copyRowToTable to copy it to our table. + err := srcTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) + wg.Wait() + if err != nil { + return err + } + return writeErr +} + +// handleCopy copies data from one table to another. +func handleCopy(w http.ResponseWriter, r *http.Request, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + src := r.FormValue("name") + if src == "" { + http.Error(w, "No source table specified.", http.StatusBadRequest) + return + } + if err := copyTable(src, dst, client, adminClient); err != nil { + http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError) + return + } + fmt.Fprint(w, "Copied table.\n") +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md b/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md new file mode 100644 index 000000000..57ba4be4e --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md @@ -0,0 +1,29 @@ +# User Counter +# (Cloud Bigtable on Managed VMs using Go) + +This app counts how often each user visits. The app uses Cloud Bigtable to store the visit counts for each user. + +## Prerequisites + +1. Set up Cloud Console. + 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. + You will need the project ID later. + 1. Go to **Settings > Project Billing Settings** and enable billing. + 1. Select **APIs & Auth > APIs**. + 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. + (You may need to search for the API). +1. Set up gcloud. + 1. `gcloud components update` + 1. `gcloud auth login` + 1. `gcloud config set project PROJECT_ID` +1. Download App Engine SDK for Go. + 1. `go get -u google.golang.org/appengine/...` +1. In main.go, change the `project` and `instance` constants. + +## Running locally + +1. From the sample project folder, `dev_appserver.py app.yaml`. + +## Deploying on Google App Engine flexible environment + +Follow the [deployment instructions](https://cloud.google.com/appengine/docs/flexible/go/testing-and-deploying-your-app). diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml b/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml new file mode 100644 index 000000000..4f5fef0e5 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml @@ -0,0 +1,11 @@ +runtime: go +api_version: go1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +# Serve only the web root. +- url: / + script: _go_app diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go b/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go new file mode 100644 index 000000000..a898369b1 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go @@ -0,0 +1,180 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +User counter is a program that tracks how often a user has visited the index page. + +This program demonstrates usage of the Cloud Bigtable API for App Engine flexible environment and Go. +Instructions for running this program are in the README.md. +*/ +package main + +import ( + "bytes" + "encoding/binary" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" + "google.golang.org/appengine" + aelog "google.golang.org/appengine/log" + "google.golang.org/appengine/user" +) + +// User-provided constants. +const ( + project = "PROJECT_ID" + instance = "INSTANCE" +) + +var ( + tableName = "user-visit-counter" + familyName = "emails" + + // Client is initialized by main. + client *bigtable.Client +) + +func main() { + ctx := context.Background() + + // Set up admin client, tables, and column families. + // NewAdminClient uses Application Default Credentials to authenticate. + adminClient, err := bigtable.NewAdminClient(ctx, project, instance) + if err != nil { + log.Fatalf("Unable to create a table admin client. %v", err) + } + tables, err := adminClient.Tables(ctx) + if err != nil { + log.Fatalf("Unable to fetch table list. %v", err) + } + if !sliceContains(tables, tableName) { + if err := adminClient.CreateTable(ctx, tableName); err != nil { + log.Fatalf("Unable to create table: %v. %v", tableName, err) + } + } + tblInfo, err := adminClient.TableInfo(ctx, tableName) + if err != nil { + log.Fatalf("Unable to read info for table: %v. %v", tableName, err) + } + if !sliceContains(tblInfo.Families, familyName) { + if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil { + log.Fatalf("Unable to create column family: %v. %v", familyName, err) + } + } + adminClient.Close() + + // Set up Bigtable data operations client. + // NewClient uses Application Default Credentials to authenticate. + client, err = bigtable.NewClient(ctx, project, instance) + if err != nil { + log.Fatalf("Unable to create data operations client. %v", err) + } + + http.Handle("/", appHandler(mainHandler)) + appengine.Main() // Never returns. +} + +// mainHandler tracks how many times each user has visited this page. +func mainHandler(w http.ResponseWriter, r *http.Request) *appError { + if r.URL.Path != "/" { + http.NotFound(w, r) + return nil + } + + ctx := appengine.NewContext(r) + u := user.Current(ctx) + if u == nil { + login, err := user.LoginURL(ctx, r.URL.String()) + if err != nil { + return &appError{err, "Error finding login URL", http.StatusInternalServerError} + } + http.Redirect(w, r, login, http.StatusFound) + return nil + } + logoutURL, err := user.LogoutURL(ctx, "/") + if err != nil { + return &appError{err, "Error finding logout URL", http.StatusInternalServerError} + } + + // Increment visit count for user. + tbl := client.Open(tableName) + rmw := bigtable.NewReadModifyWrite() + rmw.Increment(familyName, u.Email, 1) + row, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw) + if err != nil { + return &appError{err, "Error applying ReadModifyWrite to row: " + u.Email, http.StatusInternalServerError} + } + data := struct { + Username, Logout string + Visits uint64 + }{ + Username: u.Email, + // Retrieve the most recently edited column. + Visits: binary.BigEndian.Uint64(row[familyName][0].Value), + Logout: logoutURL, + } + + // Display hello page. + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return &appError{err, "Error writing template", http.StatusInternalServerError} + } + buf.WriteTo(w) + return nil +} + +var tmpl = template.Must(template.New("").Parse(` + + +

+{{with .Username}} Hello {{.}}{{end}} +{{with .Logout}}Sign out{{end}} + +

+ +

+You have visited {{.Visits}} +

+ +`)) + +// sliceContains reports whether the provided string is present in the given slice of strings. +func sliceContains(list []string, target string) bool { + for _, s := range list { + if s == target { + return true + } + } + return false +} + +// More info about this method of error handling can be found at: http://blog.golang.org/error-handling-and-go +type appHandler func(http.ResponseWriter, *http.Request) *appError + +type appError struct { + Error error + Message string + Code int +} + +func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if e := fn(w, r); e != nil { + ctx := appengine.NewContext(r) + aelog.Errorf(ctx, "%v", e.Error) + http.Error(w, e.Message, e.Code) + } +} diff --git a/vendor/cloud.google.com/go/examples/storage/appengine/app.go b/vendor/cloud.google.com/go/examples/storage/appengine/app.go new file mode 100644 index 000000000..e8cf57e61 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appengine/app.go @@ -0,0 +1,434 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//[START sample] +// Package gcsdemo is an example App Engine app using the Google Cloud Storage API. +// +// NOTE: the cloud.google.com/go/storage package is not compatible with +// dev_appserver.py, so this example will not work in a local development +// environment. +package gcsdemo + +//[START imports] +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" +) + +//[END imports] + +// bucket is a local cache of the app's default bucket name. +var bucket string // or: var bucket = ".appspot.com" + +func init() { + http.HandleFunc("/", handler) +} + +// demo struct holds information needed to run the various demo functions. +type demo struct { + bucket *storage.BucketHandle + client *storage.Client + + w io.Writer + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + fmt.Fprintln(d.w, fmt.Sprintf(format, args...)) + log.Errorf(d.ctx, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + if appengine.IsDevAppServer() { + http.Error(w, "This example does not work with dev_appserver.py", http.StatusNotImplemented) + } + + //[START get_default_bucket] + ctx := appengine.NewContext(r) + if bucket == "" { + var err error + if bucket, err = file.DefaultBucketName(ctx); err != nil { + log.Errorf(ctx, "failed to get default GCS bucket name: %v", err) + return + } + } + //[END get_default_bucket] + + client, err := storage.NewClient(ctx) + if err != nil { + log.Errorf(ctx, "failed to create client: %v", err) + return + } + defer client.Close() + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + buf := &bytes.Buffer{} + d := &demo{ + w: buf, + ctx: ctx, + client: client, + bucket: client.Bucket(bucket), + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + w.WriteHeader(http.StatusInternalServerError) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo failed.\n") + } else { + w.WriteHeader(http.StatusOK) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo succeeded.\n") + } +} + +//[START write] +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) + + wc := d.bucket.Object(fileName).NewWriter(d.ctx) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) + return + } +} + +//[END write] + +//[START read] +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := d.bucket.Object(fileName).NewReader(d.ctx) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +//[END read] + +//[START copy] +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) + + obj, err := d.bucket.Object(fileName).CopyTo(d.ctx, d.bucket.Object(copyName), nil) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +//[END copy] + +func (d *demo) dumpStats(obj *storage.ObjectAttrs) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +//[START file_metadata] +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := d.bucket.Object(fileName).Attrs(d.ctx) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + d.dumpStats(obj) +} + +//[END file_metadata] + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +//[START list_bucket] +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + for query != nil { + objs, err := d.bucket.List(d.ctx, query) + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + d.dumpStats(obj) + } + } +} + +//[END list_bucket] + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + for query != nil { + objs, err := d.bucket.List(d.ctx, query) + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + } + for _, dir := range objs.Prefixes { + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) + d.listDir(dir, indent+" ") + } + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := d.bucket.Object(fileName).ACL().List(d.ctx) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := d.bucket.Object(v).Delete(d.ctx); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) + return + } + } +} + +//[END sample] diff --git a/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml b/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml new file mode 100644 index 000000000..e98d03087 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml @@ -0,0 +1,8 @@ +application: +version: v1 +runtime: go +api_version: go1 + +handlers: +- url: /.* + script: _go_app diff --git a/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go new file mode 100644 index 000000000..fbda6eb40 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go @@ -0,0 +1,431 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is an example Mananged VM app using the Google Cloud Storage API. +package main + +//[START imports] +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" +) + +//[END imports] + +// bucket is a local cache of the app's default bucket name. +var bucket string // or: var bucket = ".appspot.com" + +func main() { + http.HandleFunc("/", handler) + appengine.Main() +} + +//[START bucket_struct] +// demo struct holds information needed to run the various demo functions. +type demo struct { + bucket *storage.BucketHandle + client *storage.Client + + w io.Writer + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +//[END bucket_struct] + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + fmt.Fprintln(d.w, fmt.Sprintf(format, args...)) + log.Errorf(d.ctx, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + //[START get_default_bucket] + ctx := appengine.NewContext(r) + if bucket == "" { + var err error + if bucket, err = file.DefaultBucketName(ctx); err != nil { + log.Errorf(ctx, "failed to get default GCS bucket name: %v", err) + return + } + } + //[END get_default_bucket] + + client, err := storage.NewClient(ctx) + if err != nil { + log.Errorf(ctx, "failed to create client: %v", err) + return + } + defer client.Close() + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + buf := &bytes.Buffer{} + d := &demo{ + w: buf, + ctx: ctx, + client: client, + bucket: client.Bucket(bucket), + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + w.WriteHeader(http.StatusInternalServerError) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo failed.\n") + } else { + w.WriteHeader(http.StatusOK) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo succeeded.\n") + } +} + +//[START write] +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) + + wc := d.bucket.Object(fileName).NewWriter(d.ctx) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) + return + } +} + +//[END write] + +//[START read] +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := d.bucket.Object(fileName).NewReader(d.ctx) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +//[END read] + +//[START copy] +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) + + obj, err := d.bucket.Object(fileName).CopyTo(d.ctx, d.bucket.Object(copyName), nil) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +//[END copy] + +func (d *demo) dumpStats(obj *storage.ObjectAttrs) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +//[START file_metadata] +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := d.bucket.Object(fileName).Attrs(d.ctx) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) + return + } + + d.dumpStats(obj) +} + +//[END file_metadata] + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +//[START list_bucket] +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + for query != nil { + objs, err := d.bucket.List(d.ctx, query) + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + d.dumpStats(obj) + } + } +} + +//[END list_bucket] + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + for query != nil { + objs, err := d.bucket.List(d.ctx, query) + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) + return + } + query = objs.Next + + for _, obj := range objs.Results { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + } + for _, dir := range objs.Prefixes { + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) + d.listDir(dir, indent+" ") + } + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := d.bucket.Object(fileName).ACL().List(d.ctx) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) + return + } + d.dumpACL(fileName) +} + +//[START delete] +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := d.bucket.Object(v).Delete(d.ctx); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) + return + } + } +} + +//[END delete] diff --git a/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml new file mode 100644 index 000000000..6847fc871 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml @@ -0,0 +1,10 @@ +runtime: go +api_version: go1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +- url: /.* + script: _go_app diff --git a/vendor/cloud.google.com/go/internal/bundler/bundler.go b/vendor/cloud.google.com/go/internal/bundler/bundler.go new file mode 100644 index 000000000..7f070ca2e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/bundler/bundler.go @@ -0,0 +1,257 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +package bundler + +import ( + "errors" + "reflect" + "sync" + "time" +) + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the bundle. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + donec chan struct{} // closed when the Bundler is closed + handlec chan int // sent to when a bundle is ready for handling + timer *time.Timer // implements DelayThreshold + + mu sync.Mutex + bufferedSize int // total bytes buffered + closedBundles []bundle // bundles waiting to be handled + curBundle bundle // incoming items added to this bundle + calledc chan struct{} // closed and re-created after handler is called +} + +type bundle struct { + items reflect.Value // slice of item type + size int // size in bytes of all items +} + +// NewBundler creates a new Bundler. When you are finished with a Bundler, call +// its Close method. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the the argument to handler is of type []T. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + donec: make(chan struct{}), + handlec: make(chan int, 1), + calledc: make(chan struct{}), + timer: time.NewTimer(1000 * time.Hour), // harmless initial timeout + } + b.curBundle.items = b.itemSliceZero + go b.background() + return b +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + b.mu.Lock() + defer b.mu.Unlock() + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + if b.bufferedSize+size > b.BufferedByteLimit { + return ErrOverflow + } + // If adding this item to the current bundle would cause it to exceed the + // maximum bundle size, close the current bundle and start a new one. + if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { + b.closeAndHandleBundle() + } + // Add the item. + b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) + b.curBundle.size += size + b.bufferedSize += size + // If this is the first item in the bundle, restart the timer. + if b.curBundle.items.Len() == 1 { + b.timer.Reset(b.DelayThreshold) + } + // If the current bundle equals the count threshold, close it. + if b.curBundle.items.Len() == b.BundleCountThreshold { + b.closeAndHandleBundle() + } + // If the current bundle equals or exceeds the byte threshold, close it. + if b.curBundle.size >= b.BundleByteThreshold { + b.closeAndHandleBundle() + } + return nil +} + +// Flush waits until all items in the Bundler have been handled. +func (b *Bundler) Flush() { + b.mu.Lock() + b.closeBundle() + // Unconditionally trigger the handling goroutine, to ensure calledc is closed + // even if there are no outstanding bundles. + select { + case b.handlec <- 1: + default: + } + calledc := b.calledc // remember locally, because it may change + b.mu.Unlock() + <-calledc +} + +// Close calls Flush, then shuts down the Bundler. Close should always be +// called on a Bundler when it is no longer needed. You must wait for all calls +// to Add to complete before calling Close. Calling Add concurrently with Close +// may result in the added items being ignored. +func (b *Bundler) Close() { + b.Flush() + b.mu.Lock() + b.timer.Stop() + b.mu.Unlock() + close(b.donec) +} + +func (b *Bundler) closeAndHandleBundle() { + if b.closeBundle() { + // We have created a closed bundle. + // Send to handlec without blocking. + select { + case b.handlec <- 1: + default: + } + } +} + +// closeBundle finishes the current bundle, adds it to the list of closed +// bundles and informs the background goroutine that there are bundles ready +// for processing. +// +// This should always be called with b.mu held. +func (b *Bundler) closeBundle() bool { + if b.curBundle.items.Len() == 0 { + return false + } + b.closedBundles = append(b.closedBundles, b.curBundle) + b.curBundle.items = b.itemSliceZero + b.curBundle.size = 0 + return true +} + +// background runs in a separate goroutine, waiting for events and handling +// bundles. +func (b *Bundler) background() { + done := false + for { + timedOut := false + // Wait for something to happen. + select { + case <-b.handlec: + case <-b.donec: + done = true + case <-b.timer.C: + timedOut = true + } + // Handle closed bundles. + b.mu.Lock() + if timedOut { + b.closeBundle() + } + buns := b.closedBundles + b.closedBundles = nil + // Closing calledc means we've sent all bundles. We need + // a new channel for the next set of bundles, which may start + // accumulating as soon as we release the lock. + calledc := b.calledc + b.calledc = make(chan struct{}) + b.mu.Unlock() + for i, bun := range buns { + b.handler(bun.items.Interface()) + // Drop the bundle's items, reducing our memory footprint. + buns[i].items = reflect.Value{} // buns[i] because bun is a copy + // Note immediately that we have more space, so Adds that occur + // during this loop will have a chance of succeeding. + b.mu.Lock() + b.bufferedSize -= bun.size + b.mu.Unlock() + } + // Signal that we've sent all outstanding bundles. + close(calledc) + if done { + break + } + } +} diff --git a/vendor/cloud.google.com/go/internal/bundler/bundler_test.go b/vendor/cloud.google.com/go/internal/bundler/bundler_test.go new file mode 100644 index 000000000..12c30ee8d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/bundler/bundler_test.go @@ -0,0 +1,224 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundler + +import ( + "reflect" + "sync" + "testing" + "time" +) + +func TestBundlerCount1(t *testing.T) { + // Unbundled case: one item per bundle. + handler := &testHandler{} + b := NewBundler(int(0), handler.handle) + b.BundleCountThreshold = 1 + b.DelayThreshold = time.Second + + for i := 0; i < 3; i++ { + if err := b.Add(i, 1); err != nil { + t.Fatal(err) + } + } + b.Close() + got := handler.bundles() + want := [][]int{{0}, {1}, {2}} + if !reflect.DeepEqual(got, want) { + t.Errorf("bundles: got %v, want %v", got, want) + } + // All bundles should have been handled "immediately": much less + // than the delay threshold of 1s. + tgot := quantizeTimes(handler.times(), 100*time.Millisecond) + twant := []int{0, 0, 0} + if !reflect.DeepEqual(tgot, twant) { + t.Errorf("times: got %v, want %v", tgot, twant) + } +} + +func TestBundlerCount3(t *testing.T) { + handler := &testHandler{} + b := NewBundler(int(0), handler.handle) + b.BundleCountThreshold = 3 + b.DelayThreshold = 100 * time.Millisecond + // Add 8 items. + // The first two bundles of 3 should both be handled quickly. + // The third bundle of 2 should not be handled for about DelayThreshold ms. + for i := 0; i < 8; i++ { + if err := b.Add(i, 1); err != nil { + t.Fatal(err) + } + } + time.Sleep(5 * b.DelayThreshold) + // We should not need to close the bundler. + + bgot := handler.bundles() + bwant := [][]int{{0, 1, 2}, {3, 4, 5}, {6, 7}} + if !reflect.DeepEqual(bgot, bwant) { + t.Errorf("bundles: got %v, want %v", bgot, bwant) + } + + tgot := quantizeTimes(handler.times(), b.DelayThreshold) + if len(tgot) != 3 || tgot[0] != 0 || tgot[1] != 0 || tgot[2] == 0 { + t.Errorf("times: got %v, want [0, 0, non-zero]", tgot) + } +} + +func TestBundlerByteThreshold(t *testing.T) { + handler := &testHandler{} + b := NewBundler(int(0), handler.handle) + b.BundleCountThreshold = 10 + b.BundleByteThreshold = 3 + add := func(i interface{}, s int) { + if err := b.Add(i, s); err != nil { + t.Fatal(err) + } + } + + add(1, 1) + add(2, 2) + // Hit byte threshold: bundle = 1, 2 + add(3, 1) + add(4, 1) + add(5, 2) + // Passed byte threshold, but not limit: bundle = 3, 4, 5 + add(6, 1) + b.Close() + bgot := handler.bundles() + bwant := [][]int{{1, 2}, {3, 4, 5}, {6}} + if !reflect.DeepEqual(bgot, bwant) { + t.Errorf("bundles: got %v, want %v", bgot, bwant) + } + tgot := quantizeTimes(handler.times(), b.DelayThreshold) + twant := []int{0, 0, 0} + if !reflect.DeepEqual(tgot, twant) { + t.Errorf("times: got %v, want %v", tgot, twant) + } +} + +func TestBundlerLimit(t *testing.T) { + handler := &testHandler{} + b := NewBundler(int(0), handler.handle) + b.BundleCountThreshold = 10 + b.BundleByteLimit = 3 + add := func(i interface{}, s int) { + if err := b.Add(i, s); err != nil { + t.Fatal(err) + } + } + + add(1, 1) + add(2, 2) + // Hit byte limit: bundle = 1, 2 + add(3, 1) + add(4, 1) + add(5, 2) + // Exceeded byte limit: bundle = 3, 4 + add(6, 2) + // Exceeded byte limit: bundle = 5 + b.Close() + bgot := handler.bundles() + bwant := [][]int{{1, 2}, {3, 4}, {5}, {6}} + if !reflect.DeepEqual(bgot, bwant) { + t.Errorf("bundles: got %v, want %v", bgot, bwant) + } + tgot := quantizeTimes(handler.times(), b.DelayThreshold) + twant := []int{0, 0, 0, 0} + if !reflect.DeepEqual(tgot, twant) { + t.Errorf("times: got %v, want %v", tgot, twant) + } +} + +func TestBundlerErrors(t *testing.T) { + // Use a handler that blocks forever, to force the bundler to run out of + // memory. + b := NewBundler(int(0), func(interface{}) { select {} }) + b.BundleByteLimit = 3 + b.BufferedByteLimit = 10 + + if got, want := b.Add(1, 4), ErrOversizedItem; got != want { + t.Fatalf("got %v, want %v", got, want) + } + + for i := 0; i < 5; i++ { + if err := b.Add(i, 2); err != nil { + t.Fatal(err) + } + } + if got, want := b.Add(5, 1), ErrOverflow; got != want { + t.Fatalf("got %v, want %v", got, want) + } +} + +type testHandler struct { + mu sync.Mutex + b [][]int + t []time.Time +} + +func (t *testHandler) bundles() [][]int { + t.mu.Lock() + defer t.mu.Unlock() + return t.b +} + +func (t *testHandler) times() []time.Time { + t.mu.Lock() + defer t.mu.Unlock() + return t.t +} + +func (t *testHandler) handle(b interface{}) { + t.mu.Lock() + defer t.mu.Unlock() + t.b = append(t.b, b.([]int)) + t.t = append(t.t, time.Now()) +} + +// Round times to the nearest q and express them as the number of q +// since the first time. +// E.g. if q is 100ms, then a time within 50ms of the first time +// will be represented as 0, a time 150 to 250ms of the first time +// we be represented as 1, etc. +func quantizeTimes(times []time.Time, q time.Duration) []int { + var rs []int + for _, t := range times { + d := t.Sub(times[0]) + r := int((d + q/2) / q) + rs = append(rs, r) + } + return rs +} + +func TestQuantizeTimes(t *testing.T) { + quantum := 100 * time.Millisecond + for _, test := range []struct { + millis []int // times in milliseconds + want []int + }{ + {[]int{10, 20, 30}, []int{0, 0, 0}}, + {[]int{0, 49, 50, 90}, []int{0, 0, 1, 1}}, + {[]int{0, 95, 170, 315}, []int{0, 1, 2, 3}}, + } { + var times []time.Time + for _, ms := range test.millis { + times = append(times, time.Unix(0, int64(ms*1e6))) + } + got := quantizeTimes(times, quantum) + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.millis, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 000000000..8e0c8f8e5 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go new file mode 100644 index 000000000..34e605898 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "io/ioutil" + "log" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + projID := os.Getenv(envProjID) + if projID == "" { + return "" + } + return projID +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. TokenSource will log.Fatal if the token +// source is specified but missing or invalid. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envPrivateKey) + if key == "" { + return nil + } + jsonKey, err := ioutil.ReadFile(key) + if err != nil { + log.Fatalf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + log.Fatalf("google.JWTConfigFromJSON: %v", err) + } + return conf.TokenSource(ctx) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/iterators.go b/vendor/cloud.google.com/go/internal/testutil/iterators.go new file mode 100644 index 000000000..e3a8ebae8 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/iterators.go @@ -0,0 +1,186 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "reflect" +) + +// TestIteratorNext tests the Next method of a standard client iterator (see +// https://github.com/GoogleCloudPlatform/google-cloud-go/wiki/Iterator-Guidelines). +// +// This function assumes that an iterator has already been created, and that +// the underlying sequence to be iterated over already exists. want should be a +// slice that contains the elements of this sequence. It must contain at least +// one element. +// +// done is the special error value that signals that the iterator has provided all the items. +// +// next is a function that returns the result of calling Next on the iterator. It can usually be +// defined as +// func() (interface{}, error) { return iter.Next() } +// +// TestIteratorNext checks that the iterator returns all the elements of want +// in order, followed by (zero, done). It also confirms that subsequent calls +// to next also return (zero, done). +// +// On success, TestIteratorNext returns ("", true). On failure, it returns a +// suitable error message and false. +func TestIteratorNext(want interface{}, done error, next func() (interface{}, error)) (string, bool) { + wVal := reflect.ValueOf(want) + if wVal.Kind() != reflect.Slice { + return "'want' must be a slice", false + } + for i := 0; i < wVal.Len(); i++ { + got, err := next() + if err != nil { + return fmt.Sprintf("#%d: got %v, expected an item", i, err), false + } + w := wVal.Index(i).Interface() + if !reflect.DeepEqual(got, w) { + return fmt.Sprintf("#%d: got %+v, want %+v", i, got, w), false + } + } + // We now should see (, done), no matter how many + // additional calls we make. + zero := reflect.Zero(wVal.Type().Elem()).Interface() + for i := 0; i < 3; i++ { + got, err := next() + if err != done { + return fmt.Sprintf("at end: got error %v, want done", err), false + } + // Since err == done, got should be zero. + if got != zero { + return fmt.Sprintf("got %+v with done, want zero %T", got, zero), false + } + } + return "", true +} + +// PagingIterator describes the standard client iterator pattern with paging as best as possible in Go. +// See https://github.com/GoogleCloudPlatform/google-cloud-go/wiki/Iterator-Guidelines. +type PagingIterator interface { + SetPageSize(int) + SetPageToken(string) + NextPageToken() string + // NextPage() ([]T, error) +} + +// TestIteratorNextPageExact tests the NextPage method of a standard client +// iterator with paging (see PagingIterator). +// +// This function assumes that the underlying sequence to be iterated over +// already exists. want should be a slice that contains the elements of this +// sequence. It must contain at least three elements, in order to test +// non-trivial paging behavior. +// +// done is the special error value that signals that the iterator has provided all the items. +// +// defaultPageSize is the page size to use when the user has not called SetPageSize, or calls +// it with a value <= 0. +// +// newIter should return a new iterator each time it is called. +// +// nextPage should return the result of calling NextPage on the iterator. It can usually be +// defined as +// func(i testutil.PagingIterator) (interface{}, error) { return i.(*).NextPage() } +// +// TestIteratorNextPageExact checks that the iterator returns all the elements +// of want in order, divided into pages of the exactly the right size. It +// confirms that if the last page is partial, done is returned along with it, +// and in any case, done is returned subsequently along with a zero-length +// slice. +// +// On success, TestIteratorNextPageExact returns ("", true). On failure, it returns a +// suitable error message and false. +func TestIteratorNextPageExact(want interface{}, done error, defaultPageSize int, newIter func() PagingIterator, nextPage func(PagingIterator) (interface{}, error)) (string, bool) { + wVal := reflect.ValueOf(want) + if wVal.Kind() != reflect.Slice { + return "'want' must be a slice", false + } + if wVal.Len() < 3 { + return "need at least 3 values for 'want' to effectively test paging", false + } + const doNotSetPageSize = -999 + for _, pageSize := range []int{doNotSetPageSize, -7, 0, 1, 2, wVal.Len(), wVal.Len() + 10} { + adjustedPageSize := int(pageSize) + if pageSize <= 0 { + adjustedPageSize = int(defaultPageSize) + } + // Create the pages we expect to see. + var wantPages []interface{} + for i, j := 0, adjustedPageSize; i < wVal.Len(); i, j = j, j+adjustedPageSize { + if j > wVal.Len() { + j = wVal.Len() + } + wantPages = append(wantPages, wVal.Slice(i, j).Interface()) + } + for _, usePageToken := range []bool{false, true} { + it := newIter() + if pageSize != doNotSetPageSize { + it.SetPageSize(pageSize) + } + for i, wantPage := range wantPages { + gotPage, err := nextPage(it) + if err != nil && err != done { + return fmt.Sprintf("usePageToken %v, pageSize %d, #%d: got %v, expected a page", + usePageToken, pageSize, i, err), false + } + if !reflect.DeepEqual(gotPage, wantPage) { + return fmt.Sprintf("usePageToken %v, pageSize %d, #%d:\ngot %v\nwant %+v", + usePageToken, pageSize, i, gotPage, wantPage), false + } + // If the last page is partial, NextPage must return done. + if reflect.ValueOf(gotPage).Len() < adjustedPageSize && err != done { + return fmt.Sprintf("usePageToken %v, pageSize %d, #%d: expected done on partial page, got %v", + usePageToken, pageSize, i, err), false + } + if usePageToken { + // Pretend that we are displaying a paginated listing on the web, and the next + // page may be served by a different process. + // Empty page token implies done, and vice versa. + if (it.NextPageToken() == "") != (err == done) { + return fmt.Sprintf("pageSize %d: next page token = %q and err = %v; expected empty page token iff done", + pageSize, it.NextPageToken(), err), false + } + if err == nil { + token := it.NextPageToken() + it = newIter() + it.SetPageSize(pageSize) + it.SetPageToken(token) + } + } + } + // We now should see (, done), no matter how many + // additional calls we make. + for i := 0; i < 3; i++ { + gotPage, err := nextPage(it) + if err != done { + return fmt.Sprintf("usePageToken %v, pageSize %d, at end: got error %v, want done", + usePageToken, pageSize, err), false + } + pVal := reflect.ValueOf(gotPage) + if pVal.Kind() != reflect.Slice || pVal.Len() != 0 { + return fmt.Sprintf("usePageToken %v, pageSize %d, at end: got %+v with done, want zero-length slice", + usePageToken, pageSize, gotPage), false + } + } + } + } + return "", true +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go new file mode 100644 index 000000000..287599f36 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "net" + + grpc "google.golang.org/grpc" +) + +// A Server is an in-process gRPC server, listening on a system-chosen port on +// the local loopback interface. Servers are for testing only and are not +// intended to be used in production code. +// +// To create a server, make a new Server, register your handlers, then call +// Start: +// +// srv, err := NewServer() +// ... +// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) +// .... +// srv.Start() +// +// Clients should connect to the server with no security: +// +// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) +// ... +type Server struct { + Addr string + l net.Listener + Gsrv *grpc.Server +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer() (*Server, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + s := &Server{ + Addr: l.Addr().String(), + l: l, + Gsrv: grpc.NewServer(), + } + return s, nil +} + +// Start causes the server to start accepting incoming connections. +// Call Start after registering handlers. +func (s *Server) Start() { + go s.Gsrv.Serve(s.l) +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Gsrv.Stop() + s.l.Close() +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server_test.go b/vendor/cloud.google.com/go/internal/testutil/server_test.go new file mode 100644 index 000000000..817ce4ee7 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + srv, err := NewServer() + if err != nil { + t.Fatal(err) + } + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + conn.Close() + srv.Close() +} diff --git a/vendor/cloud.google.com/go/key.json.enc b/vendor/cloud.google.com/go/key.json.enc new file mode 100644 index 000000000..2f673a84b Binary files /dev/null and b/vendor/cloud.google.com/go/key.json.enc differ diff --git a/vendor/cloud.google.com/go/language/apiv1beta1/doc.go b/vendor/cloud.google.com/go/language/apiv1beta1/doc.go new file mode 100644 index 000000000..ac88eab7a --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta1/doc.go @@ -0,0 +1,23 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an experimental, auto-generated package for the +// language API. +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1beta1" diff --git a/vendor/cloud.google.com/go/language/apiv1beta1/language.go b/vendor/cloud.google.com/go/language/apiv1beta1/language.go new file mode 100644 index 000000000..eacd6162c --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta1/language.go @@ -0,0 +1,21 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +const ( + gapicNameVersion = "gapic/0.1.0" +) diff --git a/vendor/cloud.google.com/go/language/apiv1beta1/language_client.go b/vendor/cloud.google.com/go/language/apiv1beta1/language_client.go new file mode 100644 index 000000000..f00d70e08 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta1/language_client.go @@ -0,0 +1,173 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "fmt" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of this client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with LanguageService. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + client: languagepb.NewLanguageServiceClient(conn), + CallOptions: defaultCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req) + return err + }, c.CallOptions.AnalyzeSentiment...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently finds proper names) in the text, +// entity types, salience, mentions for each entity, and other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req) + return err + }, c.CallOptions.AnalyzeEntities...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText advanced API that analyzes the document and provides a full set of text +// annotations, including semantic, syntactic, and sentiment information. This +// API is intended for users who are familiar with machine learning and need +// in-depth text features to build upon. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnnotateText(ctx, req) + return err + }, c.CallOptions.AnnotateText...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta1/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1beta1/language_client_example_test.go new file mode 100644 index 000000000..3da85a244 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta1/language_client_example_test.go @@ -0,0 +1,87 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1beta1" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go new file mode 100644 index 000000000..cff256a47 --- /dev/null +++ b/vendor/cloud.google.com/go/license_test.go @@ -0,0 +1,70 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var sentinels = []string{ + "Copyright", + "Google Inc", + `Licensed under the Apache License, Version 2.0 (the "License");`, +} + +func TestLicense(t *testing.T) { + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { + return nil + } + if strings.HasSuffix(path, ".pb.go") { + // .pb.go files are generated from the proto files. + // .proto files must have license headers. + return nil + } + if path == "bigtable/cmd/cbt/cbtdoc.go" { + // Automatically generated. + return nil + } + + src, err := ioutil.ReadFile(path) + if err != nil { + return nil + } + src = src[:120] // Ensure all of the sentinel values are at the top of the file. + + // Find license + for _, sentinel := range sentinels { + if !bytes.Contains(src, []byte(sentinel)) { + t.Errorf("%v: license header not present. want %q", path, sentinel) + return nil + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md new file mode 100644 index 000000000..d2d9a176e --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -0,0 +1,11 @@ +Auto-generated logging v2 clients +================================= + +This package includes auto-generated clients for the logging v2 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/logging) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go new file mode 100644 index 000000000..29438340d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -0,0 +1,327 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + configParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}") +) + +// ConfigCallOptions contains the retry settings for each method of this client. +type ConfigCallOptions struct { + ListSinks []gax.CallOption + GetSink []gax.CallOption + CreateSink []gax.CallOption + UpdateSink []gax.CallOption + DeleteSink []gax.CallOption +} + +func defaultConfigClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultConfigCallOptions() *ConfigCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + + return &ConfigCallOptions{ + ListSinks: retry[[2]string{"default", "idempotent"}], + GetSink: retry[[2]string{"default", "idempotent"}], + CreateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "non_idempotent"}], + DeleteSink: retry[[2]string{"default", "idempotent"}], + } +} + +// ConfigClient is a client for interacting with ConfigServiceV2. +type ConfigClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.ConfigServiceV2Client + + // The call options for this service. + CallOptions *ConfigCallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewConfigClient creates a new config service client. +// +// Service for configuring sinks used to export log entries outside Stackdriver +// Logging. +func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ConfigClient{ + conn: conn, + client: loggingpb.NewConfigServiceV2Client(conn), + CallOptions: defaultConfigCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// ParentPath returns the path for the parent resource. +func ConfigParentPath(project string) string { + path, err := configParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// SinkPath returns the path for the sink resource. +func ConfigSinkPath(project string, sink string) string { + path, err := configSinkPathTemplate.Render(map[string]string{ + "project": project, + "sink": sink, + }) + if err != nil { + panic(err) + } + return path +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &LogSinkIterator{} + it.apiCall = func() error { + var resp *loggingpb.ListSinksResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListSinks(ctx, req) + return err + }, c.CallOptions.ListSinks...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Sinks + return nil + } + return it +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.GetSink(ctx, req) + return err + }, c.CallOptions.GetSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateSink creates a sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.CreateSink(ctx, req) + return err + }, c.CallOptions.CreateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSink creates or updates a sink. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateSink(ctx, req) + return err + }, c.CallOptions.UpdateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSink deletes a sink. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteSink(ctx, req) + return err + }, c.CallOptions.DeleteSink...) + return err +} + +// LogSinkIterator manages a stream of *loggingpb.LogSink. +type LogSinkIterator struct { + // The current page data. + items []*loggingpb.LogSink + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *LogSinkIterator) NextPage() ([]*loggingpb.LogSink, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *LogSinkIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *LogSinkIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *LogSinkIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *LogSinkIterator) NextPageToken() string { + return it.nextPageToken +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go new file mode 100644 index 000000000..c9d4b9310 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewConfigClient() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleConfigClient_ListSinks() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListSinksRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSinks(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteSinkRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSink(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go new file mode 100644 index 000000000..30381824d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -0,0 +1,22 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package logging is an experimental, auto-generated package for the +// logging API. +// +// The Google Cloud Logging API lets you write log entries and manage your +// logs, log sinks and logs-based metrics. +package logging // import "cloud.google.com/go/logging/apiv2" diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging.go b/vendor/cloud.google.com/go/logging/apiv2/logging.go new file mode 100644 index 000000000..fb9794766 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging.go @@ -0,0 +1,26 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import "errors" + +const ( + gapicNameVersion = "gapic/0.1.0" +) + +// Done is returned by iterators on successful completion. +var Done = errors.New("iterator done") diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go new file mode 100644 index 000000000..daa838d0c --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -0,0 +1,421 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + loggingParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}") +) + +// CallOptions contains the retry settings for each method of this client. +type CallOptions struct { + DeleteLog []gax.CallOption + WriteLogEntries []gax.CallOption + ListLogEntries []gax.CallOption + ListMonitoredResourceDescriptors []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"list", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + + return &CallOptions{ + DeleteLog: retry[[2]string{"default", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], + ListLogEntries: retry[[2]string{"list", "idempotent"}], + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with LoggingServiceV2. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.LoggingServiceV2Client + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewClient creates a new logging service client. +// +// Service for ingesting and querying logs. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + client: loggingpb.NewLoggingServiceV2Client(conn), + CallOptions: defaultCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// ParentPath returns the path for the parent resource. +func LoggingParentPath(project string) string { + path, err := loggingParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// LogPath returns the path for the log resource. +func LoggingLogPath(project string, log string) string { + path, err := loggingLogPathTemplate.Render(map[string]string{ + "project": project, + "log": log, + }) + if err != nil { + panic(err) + } + return path +} + +// DeleteLog deletes a log and all its log entries. +// The log will reappear if it receives new entries. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteLog(ctx, req) + return err + }, c.CallOptions.DeleteLog...) + return err +} + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries are +// written by this method. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.WriteLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.WriteLogEntries(ctx, req) + return err + }, c.CallOptions.WriteLogEntries...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries from Cloud +// Logging. For ways to export log entries, see +// [Exporting Logs](/logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &LogEntryIterator{} + it.apiCall = func() error { + var resp *loggingpb.ListLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListLogEntries(ctx, req) + return err + }, c.CallOptions.ListLogEntries...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Entries + return nil + } + return it +} + +// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &MonitoredResourceDescriptorIterator{} + it.apiCall = func() error { + var resp *loggingpb.ListMonitoredResourceDescriptorsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) + return err + }, c.CallOptions.ListMonitoredResourceDescriptors...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.ResourceDescriptors + return nil + } + return it +} + +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + // The current page data. + items []*loggingpb.LogEntry + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *LogEntryIterator) NextPage() ([]*loggingpb.LogEntry, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *LogEntryIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *LogEntryIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *LogEntryIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *LogEntryIterator) NextPageToken() string { + return it.nextPageToken +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + // The current page data. + items []*monitoredrespb.MonitoredResourceDescriptor + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *MonitoredResourceDescriptorIterator) NextPage() ([]*monitoredrespb.MonitoredResourceDescriptor, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *MonitoredResourceDescriptorIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *MonitoredResourceDescriptorIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *MonitoredResourceDescriptorIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *MonitoredResourceDescriptorIterator) NextPageToken() string { + return it.nextPageToken +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go new file mode 100644 index 000000000..aea1a75c7 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go @@ -0,0 +1,111 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLog(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_WriteLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.WriteLogEntriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.WriteLogEntries(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogEntriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogEntries(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go new file mode 100644 index 000000000..9c65913c3 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -0,0 +1,326 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + metricsParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}") +) + +// MetricsCallOptions contains the retry settings for each method of this client. +type MetricsCallOptions struct { + ListLogMetrics []gax.CallOption + GetLogMetric []gax.CallOption + CreateLogMetric []gax.CallOption + UpdateLogMetric []gax.CallOption + DeleteLogMetric []gax.CallOption +} + +func defaultMetricsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultMetricsCallOptions() *MetricsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + + return &MetricsCallOptions{ + ListLogMetrics: retry[[2]string{"default", "idempotent"}], + GetLogMetric: retry[[2]string{"default", "idempotent"}], + CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + DeleteLogMetric: retry[[2]string{"default", "idempotent"}], + } +} + +// MetricsClient is a client for interacting with MetricsServiceV2. +type MetricsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.MetricsServiceV2Client + + // The call options for this service. + CallOptions *MetricsCallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewMetricsClient creates a new metrics service client. +// +// Service for configuring logs-based metrics. +func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricsClient{ + conn: conn, + client: loggingpb.NewMetricsServiceV2Client(conn), + CallOptions: defaultMetricsCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// ParentPath returns the path for the parent resource. +func MetricsParentPath(project string) string { + path, err := metricsParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// MetricPath returns the path for the metric resource. +func MetricsMetricPath(project string, metric string) string { + path, err := metricsMetricPathTemplate.Render(map[string]string{ + "project": project, + "metric": metric, + }) + if err != nil { + panic(err) + } + return path +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &LogMetricIterator{} + it.apiCall = func() error { + var resp *loggingpb.ListLogMetricsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListLogMetrics(ctx, req) + return err + }, c.CallOptions.ListLogMetrics...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Metrics + return nil + } + return it +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.GetLogMetric(ctx, req) + return err + }, c.CallOptions.GetLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.CreateLogMetric(ctx, req) + return err + }, c.CallOptions.CreateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateLogMetric(ctx, req) + return err + }, c.CallOptions.UpdateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteLogMetric(ctx, req) + return err + }, c.CallOptions.DeleteLogMetric...) + return err +} + +// LogMetricIterator manages a stream of *loggingpb.LogMetric. +type LogMetricIterator struct { + // The current page data. + items []*loggingpb.LogMetric + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *LogMetricIterator) NextPage() ([]*loggingpb.LogMetric, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *LogMetricIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *LogMetricIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *LogMetricIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *LogMetricIterator) NextPageToken() string { + return it.nextPageToken +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go new file mode 100644 index 000000000..671634cc2 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewMetricsClient() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricsClient_ListLogMetrics() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogMetricsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogMetrics(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricsClient_GetLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_CreateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_UpdateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_DeleteLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogMetricRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go new file mode 100644 index 000000000..8727d36ab --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -0,0 +1,478 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logging contains a Google Cloud Logging client. +// +// This package is experimental and subject to API changes. +package logging // import "cloud.google.com/go/logging" + +import ( + "errors" + "io" + "log" + "sync" + "time" + + "golang.org/x/net/context" + api "google.golang.org/api/logging/v1beta3" + "google.golang.org/api/option" + "google.golang.org/api/transport" +) + +// Scope is the OAuth2 scope necessary to use Google Cloud Logging. +const Scope = api.LoggingWriteScope + +// Level is the log level. +type Level int + +const ( + // Default means no assigned severity level. + Default Level = iota + Debug + Info + Warning + Error + Critical + Alert + Emergency + nLevel +) + +var levelName = [nLevel]string{ + Default: "", + Debug: "DEBUG", + Info: "INFO", + Warning: "WARNING", + Error: "ERROR", + Critical: "CRITICAL", + Alert: "ALERT", + Emergency: "EMERGENCY", +} + +func (v Level) String() string { + return levelName[v] +} + +// Client is a Google Cloud Logging client. +// It must be constructed via NewClient. +type Client struct { + svc *api.Service + logs *api.ProjectsLogsEntriesService + projID string + logName string + writer [nLevel]io.Writer + logger [nLevel]*log.Logger + + mu sync.Mutex + queued []*api.LogEntry + curFlush *flushCall // currently in-flight flush + flushTimer *time.Timer // nil before first use + timerActive bool // whether flushTimer is armed + inFlight int // number of log entries sent to API service but not yet ACKed + + // For testing: + timeNow func() time.Time // optional + + // ServiceName may be "appengine.googleapis.com", + // "compute.googleapis.com" or "custom.googleapis.com". + // + // The default is "custom.googleapis.com". + // + // The service name is only used by the API server to + // determine which of the labels are used to index the logs. + ServiceName string + + // CommonLabels are metadata labels that apply to all log + // entries in this request, so that you don't have to repeat + // them in each log entry's metadata.labels field. If any of + // the log entries contains a (key, value) with the same key + // that is in CommonLabels, then the entry's (key, value) + // overrides the one in CommonLabels. + CommonLabels map[string]string + + // BufferLimit is the maximum number of items to keep in memory + // before flushing. Zero means automatic. A value of 1 means to + // flush after each log entry. + // The default is currently 10,000. + BufferLimit int + + // FlushAfter optionally specifies a threshold count at which buffered + // log entries are flushed, even if the BufferInterval has not yet + // been reached. + // The default is currently 10. + FlushAfter int + + // BufferInterval is the maximum amount of time that an item + // should remain buffered in memory before being flushed to + // the logging service. + // The default is currently 1 second. + BufferInterval time.Duration + + // Overflow is a function which runs when the Log function + // overflows its configured buffer limit. If nil, the log + // entry is dropped. The return value from Overflow is + // returned by Log. + Overflow func(*Client, Entry) error +} + +func (c *Client) flushAfter() int { + if v := c.FlushAfter; v > 0 { + return v + } + return 10 +} + +func (c *Client) bufferInterval() time.Duration { + if v := c.BufferInterval; v > 0 { + return v + } + return time.Second +} + +func (c *Client) bufferLimit() int { + if v := c.BufferLimit; v > 0 { + return v + } + return 10000 +} + +func (c *Client) serviceName() string { + if v := c.ServiceName; v != "" { + return v + } + return "custom.googleapis.com" +} + +func (c *Client) now() time.Time { + if now := c.timeNow; now != nil { + return now() + } + return time.Now() +} + +// Writer returns an io.Writer for the provided log level. +// +// Each Write call on the returned Writer generates a log entry. +// +// This Writer accessor does not allocate, so callers do not need to +// cache. +func (c *Client) Writer(v Level) io.Writer { return c.writer[v] } + +// Logger returns a *log.Logger for the provided log level. +// +// A Logger for each Level is pre-allocated by NewClient with an empty +// prefix and no flags. This Logger accessor does not allocate. +// Callers wishing to use alternate flags (such as log.Lshortfile) may +// mutate the returned Logger with SetFlags. Such mutations affect all +// callers in the program. +func (c *Client) Logger(v Level) *log.Logger { return c.logger[v] } + +type levelWriter struct { + level Level + c *Client +} + +func (w levelWriter) Write(p []byte) (n int, err error) { + return len(p), w.c.Log(Entry{ + Level: w.level, + Payload: string(p), + }) +} + +// Entry is a log entry. +type Entry struct { + // Time is the time of the entry. If the zero value, the current time is used. + Time time.Time + + // Level is log entry's severity level. + // The zero value means no assigned severity level. + Level Level + + // Payload must be either a string, []byte, or something that + // marshals via the encoding/json package to a JSON object + // (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // Depending on the Client's ServiceName, these are indexed differently + // by the Cloud Logging Service. + // See https://cloud.google.com/logging/docs/logs_index + // The Client.Log method takes ownership of this map. + Labels map[string]string + + // TODO: de-duping id +} + +func (c *Client) apiEntry(e Entry) (*api.LogEntry, error) { + t := e.Time + if t.IsZero() { + t = c.now() + } + + ent := &api.LogEntry{ + Metadata: &api.LogEntryMetadata{ + Timestamp: t.UTC().Format(time.RFC3339Nano), + ServiceName: c.serviceName(), + Severity: e.Level.String(), + Labels: e.Labels, + }, + } + switch p := e.Payload.(type) { + case string: + ent.TextPayload = p + case []byte: + ent.TextPayload = string(p) + default: + ent.StructPayload = api.LogEntryStructPayload(p) + } + return ent, nil +} + +// LogSync logs e synchronously without any buffering. +// This is mostly intended for debugging or critical errors. +func (c *Client) LogSync(e Entry) error { + ent, err := c.apiEntry(e) + if err != nil { + return err + } + _, err = c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{ + CommonLabels: c.CommonLabels, + Entries: []*api.LogEntry{ent}, + }).Do() + return err +} + +var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") + +// Log queues an entry to be sent to the logging service, subject to the +// Client's parameters. By default, the log will be flushed within +// one second. +// Log only returns an error if the entry is invalid or the queue is at +// capacity. If the queue is at capacity and the entry can't be added, +// Log returns either ErrOverflow when c.Overflow is nil, or the +// value returned by c.Overflow. +func (c *Client) Log(e Entry) error { + ent, err := c.apiEntry(e) + if err != nil { + return err + } + + c.mu.Lock() + buffered := len(c.queued) + c.inFlight + + if buffered >= c.bufferLimit() { + c.mu.Unlock() + if fn := c.Overflow; fn != nil { + return fn(c, e) + } + return ErrOverflow + } + defer c.mu.Unlock() + + c.queued = append(c.queued, ent) + if len(c.queued) >= c.flushAfter() { + c.scheduleFlushLocked(0) + return nil + } + c.scheduleFlushLocked(c.bufferInterval()) + return nil +} + +// c.mu must be held. +// +// d will be one of two values: either c.BufferInterval (or its +// default value) or 0. +func (c *Client) scheduleFlushLocked(d time.Duration) { + if c.inFlight > 0 { + // For now to keep things simple, only allow one HTTP + // request in flight at a time. + return + } + switch { + case c.flushTimer == nil: + // First flush. + c.timerActive = true + c.flushTimer = time.AfterFunc(d, c.timeoutFlush) + case c.timerActive && d == 0: + // Make it happen sooner. For example, this is the + // case of transitioning from a 1 second flush after + // the 1st item to an immediate flush after the 10th + // item. + c.flushTimer.Reset(0) + case !c.timerActive: + c.timerActive = true + c.flushTimer.Reset(d) + default: + // else timer was already active, also at d > 0, + // so we don't touch it and let it fire as previously + // scheduled. + } +} + +// timeoutFlush runs in its own goroutine (from time.AfterFunc) and +// flushes c.queued. +func (c *Client) timeoutFlush() { + c.mu.Lock() + c.timerActive = false + c.mu.Unlock() + if err := c.Flush(); err != nil { + // schedule another try + // TODO: smarter back-off? + c.mu.Lock() + c.scheduleFlushLocked(5 * time.Second) + c.mu.Unlock() + } +} + +// Ping reports whether the client's connection to Google Cloud Logging and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping() error { + ent := &api.LogEntry{ + Metadata: &api.LogEntryMetadata{ + // Identical timestamps required for deduping in addition to identical insert IDs. + Timestamp: time.Unix(0, 0).UTC().Format(time.RFC3339Nano), + ServiceName: c.serviceName(), + }, + InsertId: "ping", // dedup, so there is only ever one entry + TextPayload: "ping", + } + _, err := c.logs.Write(c.projID, "ping", &api.WriteLogEntriesRequest{ + Entries: []*api.LogEntry{ent}, + }).Do() + return err +} + +// Flush flushes any buffered log entries. +func (c *Client) Flush() error { + var numFlush int + c.mu.Lock() + for { + // We're already flushing (or we just started flushing + // ourselves), so wait for it to finish. + if f := c.curFlush; f != nil { + wasEmpty := len(c.queued) == 0 + c.mu.Unlock() + <-f.donec // wait for it + numFlush++ + // Terminate whenever there's an error, we've + // already flushed twice (one that was already + // in-flight when flush was called, and then + // one we instigated), or the queue was empty + // when we released the locked (meaning this + // in-flight flush removes everything present + // when Flush was called, and we don't need to + // kick off a new flush for things arriving + // afterward) + if f.err != nil || numFlush == 2 || wasEmpty { + return f.err + } + // Otherwise, re-obtain the lock and loop, + // starting over with seeing if a flush is in + // progress, which might've been started by a + // different goroutine before aquiring this + // lock again. + c.mu.Lock() + continue + } + + // Terminal case: + if len(c.queued) == 0 { + c.mu.Unlock() + return nil + } + + c.startFlushLocked() + } +} + +// requires c.mu be held. +func (c *Client) startFlushLocked() { + if c.curFlush != nil { + panic("internal error: flush already in flight") + } + if len(c.queued) == 0 { + panic("internal error: no items queued") + } + logEntries := c.queued + c.inFlight = len(logEntries) + c.queued = nil + + flush := &flushCall{ + donec: make(chan struct{}), + } + c.curFlush = flush + go func() { + defer close(flush.donec) + _, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{ + CommonLabels: c.CommonLabels, + Entries: logEntries, + }).Do() + flush.err = err + c.mu.Lock() + defer c.mu.Unlock() + c.inFlight = 0 + c.curFlush = nil + if err != nil { + c.queued = append(c.queued, logEntries...) + } else if len(c.queued) > 0 { + c.scheduleFlushLocked(c.bufferInterval()) + } + }() + +} + +const prodAddr = "https://logging.googleapis.com/" + +const userAgent = "gcloud-golang-logging/20150922" + +// NewClient returns a new log client, logging to the named log in the +// provided project. +// +// The exported fields on the returned client may be modified before +// the client is used for logging. Once log entries are in flight, +// the fields must not be modified. +func NewClient(ctx context.Context, projectID, logName string, opts ...option.ClientOption) (*Client, error) { + httpClient, endpoint, err := transport.NewHTTPClient(ctx, append([]option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithUserAgent(userAgent), + }, opts...)...) + if err != nil { + return nil, err + } + svc, err := api.New(httpClient) + if err != nil { + return nil, err + } + svc.BasePath = endpoint + c := &Client{ + svc: svc, + logs: api.NewProjectsLogsEntriesService(svc), + logName: logName, + projID: projectID, + } + for i := range c.writer { + level := Level(i) + c.writer[level] = levelWriter{level, c} + c.logger[level] = log.New(c.writer[level], "", 0) + } + return c, nil +} + +// flushCall is an in-flight or completed flush. +type flushCall struct { + donec chan struct{} // closed when response is in + err error // error is valid after wg is Done +} diff --git a/vendor/cloud.google.com/go/logging/logging_test.go b/vendor/cloud.google.com/go/logging/logging_test.go new file mode 100644 index 000000000..fbe571542 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_test.go @@ -0,0 +1,394 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +func TestLogPayload(t *testing.T) { + lt := newLogTest(t) + defer lt.ts.Close() + + tests := []struct { + name string + entry Entry + want string + }{ + { + name: "string", + entry: Entry{ + Time: time.Unix(0, 0), + Payload: "some log string", + }, + want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"textPayload":"some log string"}]}`, + }, + { + name: "[]byte", + entry: Entry{ + Time: time.Unix(0, 0), + Payload: []byte("some log bytes"), + }, + want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"textPayload":"some log bytes"}]}`, + }, + { + name: "struct", + entry: Entry{ + Time: time.Unix(0, 0), + Payload: struct { + Foo string `json:"foo"` + Bar int `json:"bar,omitempty"` + }{ + Foo: "foovalue", + }, + }, + want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"foo":"foovalue"}}]}`, + }, + { + name: "map[string]interface{}", + entry: Entry{ + Time: time.Unix(0, 0), + Payload: map[string]interface{}{ + "string": "foo", + "int": 42, + }, + }, + want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"int":42,"string":"foo"}}]}`, + }, + { + name: "map[string]interface{}", + entry: Entry{ + Time: time.Unix(0, 0), + Payload: customJSONObject{}, + }, + want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"custom":"json"}}]}`, + }, + } + for _, tt := range tests { + lt.startGetRequest() + if err := lt.c.LogSync(tt.entry); err != nil { + t.Errorf("%s: LogSync = %v", tt.name, err) + continue + } + got := lt.getRequest() + if got != tt.want { + t.Errorf("%s: mismatch\n got: %s\nwant: %s\n", tt.name, got, tt.want) + } + } +} + +func TestBufferInterval(t *testing.T) { + lt := newLogTest(t) + defer lt.ts.Close() + + lt.c.CommonLabels = map[string]string{ + "common1": "one", + "common2": "two", + } + lt.c.BufferInterval = 1 * time.Millisecond // immediately, basically. + lt.c.FlushAfter = 100 // but we'll only send 1 + + lt.startGetRequest() + lt.c.Logger(Debug).Printf("log line 1") + got := lt.getRequest() + want := `{"commonLabels":{"common1":"one","common2":"two"},"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"}]}` + if got != want { + t.Errorf(" got: %s\nwant: %s\n", got, want) + } +} + +func TestFlushAfter(t *testing.T) { + lt := newLogTest(t) + defer lt.ts.Close() + + lt.c.CommonLabels = map[string]string{ + "common1": "one", + "common2": "two", + } + lt.c.BufferInterval = getRequestTimeout * 2 + lt.c.FlushAfter = 2 + + lt.c.Logger(Debug).Printf("log line 1") + lt.startGetRequest() + lt.c.Logger(Debug).Printf("log line 2") + got := lt.getRequest() + want := `{"commonLabels":{"common1":"one","common2":"two"},"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:02Z"},"textPayload":"log line 2\n"}]}` + if got != want { + t.Errorf(" got: %s\nwant: %s\n", got, want) + } +} + +func TestFlush(t *testing.T) { + lt := newLogTest(t) + defer lt.ts.Close() + lt.c.BufferInterval = getRequestTimeout * 2 + lt.c.FlushAfter = 100 // but we'll only send 1, requiring a Flush + + lt.c.Logger(Debug).Printf("log line 1") + lt.startGetRequest() + if err := lt.c.Flush(); err != nil { + t.Fatal(err) + } + got := lt.getRequest() + want := `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"}]}` + if got != want { + t.Errorf(" got: %s\nwant: %s\n", got, want) + } +} + +func TestOverflow(t *testing.T) { + lt := newLogTest(t) + defer lt.ts.Close() + + lt.c.FlushAfter = 1 + lt.c.BufferLimit = 5 + lt.c.BufferInterval = 1 * time.Millisecond // immediately, basically. + + someErr := errors.New("some specific error value") + lt.c.Overflow = func(c *Client, e Entry) error { + return someErr + } + + unblock := make(chan bool, 1) + inHandler := make(chan bool, 1) + lt.handlerc <- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-unblock + ioutil.ReadAll(r.Body) + io.WriteString(w, "{}") // WriteLogEntriesResponse + }) + + lt.c.Logger(Debug).Printf("log line 1") + <-inHandler + lt.c.Logger(Debug).Printf("log line 2") + lt.c.Logger(Debug).Printf("log line 3") + lt.c.Logger(Debug).Printf("log line 4") + lt.c.Logger(Debug).Printf("log line 5") + + queued, inFlight := lt.c.stats() + if want := 4; queued != want { + t.Errorf("queued = %d; want %d", queued, want) + } + if want := 1; inFlight != want { + t.Errorf("inFlight = %d; want %d", inFlight, want) + } + + if err := lt.c.Log(Entry{Payload: "to overflow"}); err != someErr { + t.Errorf("Log(overflow Log entry) = %v; want someErr", err) + } + lt.startGetRequest() + unblock <- true + got := lt.getRequest() + want := `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:02Z"},"textPayload":"log line 2\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:03Z"},"textPayload":"log line 3\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:04Z"},"textPayload":"log line 4\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:05Z"},"textPayload":"log line 5\n"}]}` + if got != want { + t.Errorf(" got: %s\nwant: %s\n", got, want) + } + if err := lt.c.Flush(); err != nil { + t.Fatal(err) + } + queued, inFlight = lt.c.stats() + if want := 0; queued != want { + t.Errorf("queued = %d; want %d", queued, want) + } + if want := 0; inFlight != want { + t.Errorf("inFlight = %d; want %d", inFlight, want) + } +} + +func TestIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projID := testutil.ProjID() + + c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("error creating client: %v", err) + } + + if err := c.Ping(); err != nil { + t.Fatalf("error pinging logging api: %v", err) + } + // Ping twice, to verify that deduping doesn't change the result. + if err := c.Ping(); err != nil { + t.Fatalf("error pinging logging api: %v", err) + } + + if err := c.LogSync(Entry{Payload: customJSONObject{}}); err != nil { + t.Fatalf("error writing log: %v", err) + } + + if err := c.Log(Entry{Payload: customJSONObject{}}); err != nil { + t.Fatalf("error writing log: %v", err) + } + + if _, err := c.Writer(Default).Write([]byte("test log with io.Writer")); err != nil { + t.Fatalf("error writing log using io.Writer: %v", err) + } + + c.Logger(Default).Println("test log with log.Logger") + + if err := c.Flush(); err != nil { + t.Fatalf("error flushing logs: %v", err) + } +} + +func TestIntegrationPingBadProject(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + for _, projID := range []string{ + testutil.ProjID() + "-BAD", // nonexistent project + "amazing-height-519", // exists, but wrong creds + } { + c, err := NewClient(ctx, projID, "logging-integration-test", option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("project %s: error creating client: %v", projID, err) + } + if err := c.Ping(); err == nil { + t.Errorf("project %s: want error pinging logging api, got nil", projID) + } + // Ping twice, just to make sure the deduping doesn't mess with the result. + if err := c.Ping(); err == nil { + t.Errorf("project %s: want error pinging logging api, got nil", projID) + } + } +} + +func (c *Client) stats() (queued, inFlight int) { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.queued), c.inFlight +} + +type customJSONObject struct{} + +func (customJSONObject) MarshalJSON() ([]byte, error) { + return []byte(`{"custom":"json"}`), nil +} + +type logTest struct { + t *testing.T + ts *httptest.Server + c *Client + handlerc chan<- http.Handler + + bodyc chan string +} + +func newLogTest(t *testing.T) *logTest { + handlerc := make(chan http.Handler, 1) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + select { + case h := <-handlerc: + h.ServeHTTP(w, r) + default: + slurp, _ := ioutil.ReadAll(r.Body) + t.Errorf("Unexpected HTTP request received: %s", slurp) + w.WriteHeader(500) + io.WriteString(w, "unexpected HTTP request") + } + })) + c, err := NewClient(context.Background(), "PROJ-ID", "LOG-NAME", + option.WithEndpoint(ts.URL), + option.WithTokenSource(dummyTokenSource{}), // prevent DefaultTokenSource + ) + if err != nil { + t.Fatal(err) + } + var clock struct { + sync.Mutex + now time.Time + } + c.timeNow = func() time.Time { + clock.Lock() + defer clock.Unlock() + if clock.now.IsZero() { + clock.now = time.Unix(0, 0) + } + clock.now = clock.now.Add(1 * time.Second) + return clock.now + } + return &logTest{ + t: t, + ts: ts, + c: c, + handlerc: handlerc, + } +} + +func (lt *logTest) startGetRequest() { + bodyc := make(chan string, 1) + lt.bodyc = bodyc + lt.handlerc <- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + bodyc <- "ERROR: " + err.Error() + } else { + bodyc <- string(slurp) + } + io.WriteString(w, "{}") // a complete WriteLogEntriesResponse JSON struct + }) +} + +const getRequestTimeout = 5 * time.Second + +func (lt *logTest) getRequest() string { + if lt.bodyc == nil { + lt.t.Fatalf("getRequest called without previous startGetRequest") + } + select { + case v := <-lt.bodyc: + return strings.TrimSpace(v) + case <-time.After(getRequestTimeout): + lt.t.Fatalf("timeout waiting for request") + panic("unreachable") + } +} + +// dummyTokenSource returns fake oauth2 tokens for local testing. +type dummyTokenSource struct{} + +func (dummyTokenSource) Token() (*oauth2.Token, error) { + return new(oauth2.Token), nil +} diff --git a/vendor/cloud.google.com/go/preview/logging/example_entry_iterator_test.go b/vendor/cloud.google.com/go/preview/logging/example_entry_iterator_test.go new file mode 100644 index 000000000..1e43e5293 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/example_entry_iterator_test.go @@ -0,0 +1,66 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Entries() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx, logging.Filter(`logName = "projects/my-project/logs/my-log"`)) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleFilter_timestamp() { + // This example demonstrates how to list the last 24 hours of log entries. + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + oneDayAgo := time.Now().Add(-24 * time.Hour) + t := oneDayAgo.Format(time.RFC3339) // Logging API wants timestamps in RFC 3339 format. + it := client.Entries(ctx, logging.Filter(fmt.Sprintf(`timestamp > "%s"`, t))) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleEntryIterator_Next() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx) + for { + entry, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(entry) + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/example_metric_iterator_test.go b/vendor/cloud.google.com/go/preview/logging/example_metric_iterator_test.go new file mode 100644 index 000000000..fae8a44b8 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/example_metric_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Metrics() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleMetricIterator_Next() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + for { + metric, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(metric) + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/example_paging_test.go b/vendor/cloud.google.com/go/preview/logging/example_paging_test.go new file mode 100644 index 000000000..cc6159338 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/example_paging_test.go @@ -0,0 +1,91 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var ( + client *logging.Client + projectID = flag.String("project-id", "", "ID of the project to use") +) + +func ExampleClient_Entries_pagination() { + // This example demonstrates how to iterate through items a page at a time + // even if each successive page is fetched by a different process. It is a + // complete web server that displays pages of log entries. To run it as a + // standalone program, rename both the package and this function to "main". + ctx := context.Background() + flag.Parse() + if *projectID == "" { + log.Fatal("-project-id missing") + } + var err error + client, err = logging.NewClient(ctx, *projectID) + if err != nil { + log.Fatalf("creating logging client: %v", err) + } + + http.HandleFunc("/entries", handleEntries) + log.Print("listening on 8080") + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +var pageTemplate = template.Must(template.New("").Parse(` +
+ {{range .Entries}} + + {{end}} +
{{.}}
+{{if .Next}} + Next Page +{{end}} +`)) + +func handleEntries(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + filter := fmt.Sprintf(`logName = "projects/%s/logs/testlog"`, *projectID) + it := client.Entries(ctx, logging.Filter(filter)) + var entries []*logging.Entry + nextTok, err := iterator.NewPager(it, 5, r.URL.Query().Get("pageToken")).NextPage(&entries) + if err != nil { + http.Error(w, fmt.Sprintf("problem getting the next page: %v", err), http.StatusInternalServerError) + return + } + data := struct { + Entries []*logging.Entry + Next string + }{ + entries, + nextTok, + } + var buf bytes.Buffer + if err := pageTemplate.Execute(&buf, data); err != nil { + http.Error(w, fmt.Sprintf("problem executing page template: %v", err), http.StatusInternalServerError) + } + if _, err := buf.WriteTo(w); err != nil { + log.Printf("writing response: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/example_resource_iterator_test.go b/vendor/cloud.google.com/go/preview/logging/example_resource_iterator_test.go new file mode 100644 index 000000000..da8e75f1c --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/example_resource_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_ResourceDescriptors() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleResourceDescriptorIterator_Next() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + for { + rdesc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(rdesc) + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/example_sink_iterator_test.go b/vendor/cloud.google.com/go/preview/logging/example_sink_iterator_test.go new file mode 100644 index 000000000..dac0039a3 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/example_sink_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Sinks() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleSinkIterator_Next() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/examples_test.go b/vendor/cloud.google.com/go/preview/logging/examples_test.go new file mode 100644 index 000000000..4f1659628 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/examples_test.go @@ -0,0 +1,251 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/preview/logging" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func ExampleClient_Ping() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: handle error. + } + if err := client.Ping(ctx); err != nil { + // TODO: handle error. + } +} + +func ExampleNewClient_errorFunc() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Print all errors to stdout. + client.OnError = func(e error) { + fmt.Fprintf(os.Stdout, "logging: %v", e) + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.DeleteLog(ctx, "my-log") + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Logger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + _ = lg // TODO: use the Logger. +} + +func ExampleLogger_LogSync() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + err = lg.LogSync(ctx, logging.Entry{Payload: "red alert"}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleLogger_Log() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) +} + +func ExampleLogger_Flush() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) + lg.Flush() +} + +func ExampleLogger_StandardLogger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + slg := lg.StandardLogger(logging.Info) + slg.Println("an informative message") +} + +func ExampleClient_CreateMetric() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.CreateMetric(ctx, &logging.Metric{ + ID: "severe-errors", + Description: "entries at ERROR or higher severities", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMetric() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteMetric(ctx, "severe-errors"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Metric() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + m, err := client.Metric(ctx, "severe-errors") + if err != nil { + // TODO: Handle error. + } + fmt.Println(m) +} + +func ExampleClient_UpdateMetric() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.UpdateMetric(ctx, &logging.Metric{ + ID: "severe-errors", + Description: "entries at high severities", + Filter: "severity > ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSink() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.CreateSink(ctx, &logging.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} + +func ExampleClient_DeleteSink() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteSink(ctx, "severe-errors-to-gcs"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Sink() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + s, err := client.Sink(ctx, "severe-errors-to-gcs") + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleClient_UpdateSink() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.UpdateSink(ctx, &logging.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-other-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} + +func ExampleParseSeverity() { + sev := logging.ParseSeverity("ALERT") + fmt.Println(sev) + // Output: Alert +} diff --git a/vendor/cloud.google.com/go/preview/logging/internal/testing/fake.go b/vendor/cloud.google.com/go/preview/logging/internal/testing/fake.go new file mode 100644 index 000000000..f7b702e70 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/internal/testing/fake.go @@ -0,0 +1,408 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing provides support for testing the logging client. +package testing + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + emptypb "github.com/golang/protobuf/ptypes/empty" + tspb "github.com/golang/protobuf/ptypes/timestamp" + + "cloud.google.com/go/internal/testutil" + context "golang.org/x/net/context" + lpb "google.golang.org/genproto/googleapis/api/label" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +type loggingHandler struct { + logpb.LoggingServiceV2Server + + mu sync.Mutex + logs map[string][]*logpb.LogEntry // indexed by log name +} + +type configHandler struct { + logpb.ConfigServiceV2Server + + mu sync.Mutex + sinks map[string]*logpb.LogSink // indexed by (full) sink name +} + +type metricHandler struct { + logpb.MetricsServiceV2Server + + mu sync.Mutex + metrics map[string]*logpb.LogMetric // indexed by (full) metric name +} + +// NewServer creates a new in-memory fake server implementing the logging service. +// It returns the address of the server. +func NewServer() (string, error) { + srv, err := testutil.NewServer() + if err != nil { + return "", err + } + logpb.RegisterLoggingServiceV2Server(srv.Gsrv, &loggingHandler{ + logs: make(map[string][]*logpb.LogEntry), + }) + logpb.RegisterConfigServiceV2Server(srv.Gsrv, &configHandler{ + sinks: make(map[string]*logpb.LogSink), + }) + logpb.RegisterMetricsServiceV2Server(srv.Gsrv, &metricHandler{ + metrics: make(map[string]*logpb.LogMetric), + }) + srv.Start() + return srv.Addr, nil +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it +// receives new entries. +func (h *loggingHandler) DeleteLog(_ context.Context, req *logpb.DeleteLogRequest) (*emptypb.Empty, error) { + // TODO(jba): return NotFound if log isn't there? + h.mu.Lock() + defer h.mu.Unlock() + delete(h.logs, req.LogName) + return &emptypb.Empty{}, nil +} + +// The only project ID that WriteLogEntries will accept. +// Important for testing Ping. +const validProjectID = "PROJECT_ID" + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries in +// Stackdriver Logging are written by this method. +func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) { + if !strings.HasPrefix(req.LogName, "projects/"+validProjectID+"/") { + return nil, fmt.Errorf("bad project ID: %q", req.LogName) + } + // TODO(jba): support insertId? + h.mu.Lock() + defer h.mu.Unlock() + for _, e := range req.Entries { + // Assign timestamp if missing. + if e.Timestamp == nil { + e.Timestamp = &tspb.Timestamp{Seconds: time.Now().Unix(), Nanos: 0} + } + // Fill from common fields in request. + if e.LogName == "" { + e.LogName = req.LogName + } + if e.Resource == nil { + // TODO(jba): use a global one if nil? + e.Resource = req.Resource + } + for k, v := range req.Labels { + if _, ok := e.Labels[k]; !ok { + e.Labels[k] = v + } + } + + // Store by log name. + h.logs[e.LogName] = append(h.logs[e.LogName], e) + } + return &logpb.WriteLogEntriesResponse{}, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries +// from Stackdriver Logging. +// +// This fake implementation ignores project IDs. It does not support full filtering, only +// expressions of the form "logName = NAME". +func (h *loggingHandler) ListLogEntries(_ context.Context, req *logpb.ListLogEntriesRequest) (*logpb.ListLogEntriesResponse, error) { + h.mu.Lock() + defer h.mu.Unlock() + entries, err := h.filterEntries(req.Filter) + if err != nil { + return nil, err + } + if err = sortEntries(entries, req.OrderBy); err != nil { + return nil, err + } + + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(entries)) + if err != nil { + return nil, err + } + return &logpb.ListLogEntriesResponse{ + Entries: entries[from:to], + NextPageToken: nextPageToken, + }, nil +} + +// getPage converts an incoming page size and token from an RPC request into +// slice bounds and the outgoing next-page token. +// +// getPage assumes that the complete, unpaginated list of items exists as a +// single slice. In addition to the page size and token, getPage needs the +// length of that slice. +// +// getPage's first two return values should be used to construct a sub-slice of +// the complete, unpaginated slice. E.g. if the complete slice is s, then +// s[from:to] is the desired page. Its third return value should be set as the +// NextPageToken field of the RPC response. +func getPage(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { + from, to = 0, length + if pageToken != "" { + from, err = strconv.Atoi(pageToken) + if err != nil { + return 0, 0, "", invalidArgument("bad page token") + } + if from >= length { + return length, length, "", nil + } + } + if pageSize > 0 && from+pageSize < length { + to = from + pageSize + nextPageToken = strconv.Itoa(to) + } + return from, to, nextPageToken, nil +} + +func (h *loggingHandler) filterEntries(filter string) ([]*logpb.LogEntry, error) { + logName, err := parseFilter(filter) + if err != nil { + return nil, err + } + if logName != "" { + return h.logs[logName], nil + } + var entries []*logpb.LogEntry + for _, es := range h.logs { + entries = append(entries, es...) + } + return entries, nil +} + +var filterRegexp = regexp.MustCompile(`^logName\s*=\s*"?([-_/.%\w]+)"?$`) + +// returns the log name, or "" for the empty filter +func parseFilter(filter string) (string, error) { + if filter == "" { + return "", nil + } + subs := filterRegexp.FindStringSubmatch(filter) + if subs == nil { + return "", invalidArgument("bad filter") + } + return subs[1], nil // cannot panic by construction of regexp +} + +func sortEntries(entries []*logpb.LogEntry, orderBy string) error { + switch orderBy { + case "", "timestamp asc": + sort.Sort(byTimestamp(entries)) + return nil + + case "timestamp desc": + sort.Sort(sort.Reverse(byTimestamp(entries))) + return nil + + default: + return invalidArgument("bad order_by") + } +} + +type byTimestamp []*logpb.LogEntry + +func (s byTimestamp) Len() int { return len(s) } +func (s byTimestamp) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTimestamp) Less(i, j int) bool { + c := compareTimestamps(s[i].Timestamp, s[j].Timestamp) + switch { + case c < 0: + return true + case c > 0: + return false + default: + return s[i].InsertId < s[j].InsertId + } +} + +func compareTimestamps(ts1, ts2 *tspb.Timestamp) int64 { + if ts1.Seconds != ts2.Seconds { + return ts1.Seconds - ts2.Seconds + } + return int64(ts1.Nanos - ts2.Nanos) +} + +// Lists monitored resource descriptors that are used by Stackdriver Logging. +func (h *loggingHandler) ListMonitoredResourceDescriptors(context.Context, *logpb.ListMonitoredResourceDescriptorsRequest) (*logpb.ListMonitoredResourceDescriptorsResponse, error) { + return &logpb.ListMonitoredResourceDescriptorsResponse{ + ResourceDescriptors: []*mrpb.MonitoredResourceDescriptor{ + { + Type: "global", + DisplayName: "Global", + Description: "... a log is not associated with any specific resource.", + Labels: []*lpb.LabelDescriptor{ + {Key: "project_id", Description: "The identifier of the GCP project..."}, + }, + }, + }, + }, nil +} + +// Gets a sink. +func (h *configHandler) GetSink(_ context.Context, req *logpb.GetSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.sinks[req.SinkName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("sink %q not found", req.SinkName) +} + +// Creates a sink. +func (h *configHandler) CreateSink(_ context.Context, req *logpb.CreateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/sinks/%s", req.Parent, req.Sink.Name) + if _, ok := h.sinks[fullName]; ok { + return nil, fmt.Errorf("sink with name %q already exists", fullName) + } + h.sinks[fullName] = req.Sink + return req.Sink, nil +} + +// Creates or updates a sink. +func (h *configHandler) UpdateSink(_ context.Context, req *logpb.UpdateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent sink will create it. + h.sinks[req.SinkName] = req.Sink + return req.Sink, nil +} + +// Deletes a sink. +func (h *configHandler) DeleteSink(_ context.Context, req *logpb.DeleteSinkRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.sinks, req.SinkName) + return &emptypb.Empty{}, nil +} + +// Lists sinks. This fake implementation ignores the Parent field of +// ListSinksRequest. All sinks are listed, regardless of their project. +func (h *configHandler) ListSinks(_ context.Context, req *logpb.ListSinksRequest) (*logpb.ListSinksResponse, error) { + h.mu.Lock() + var sinks []*logpb.LogSink + for _, s := range h.sinks { + sinks = append(sinks, s) + } + h.mu.Unlock() // safe because no *logpb.LogSink is ever modified + // Since map iteration varies, sort the sinks. + sort.Sort(sinksByName(sinks)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(sinks)) + if err != nil { + return nil, err + } + return &logpb.ListSinksResponse{ + Sinks: sinks[from:to], + NextPageToken: nextPageToken, + }, nil + + return nil, nil +} + +type sinksByName []*logpb.LogSink + +func (s sinksByName) Len() int { return len(s) } +func (s sinksByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sinksByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +// Gets a metric. +func (h *metricHandler) GetLogMetric(_ context.Context, req *logpb.GetLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.metrics[req.MetricName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("metric %q not found", req.MetricName) +} + +// Creates a metric. +func (h *metricHandler) CreateLogMetric(_ context.Context, req *logpb.CreateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/metrics/%s", req.Parent, req.Metric.Name) + if _, ok := h.metrics[fullName]; ok { + return nil, fmt.Errorf("metric with name %q already exists", fullName) + } + h.metrics[fullName] = req.Metric + return req.Metric, nil +} + +// Creates or updates a metric. +func (h *metricHandler) UpdateLogMetric(_ context.Context, req *logpb.UpdateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent metric will create it. + h.metrics[req.MetricName] = req.Metric + return req.Metric, nil +} + +// Deletes a metric. +func (h *metricHandler) DeleteLogMetric(_ context.Context, req *logpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.metrics, req.MetricName) + return &emptypb.Empty{}, nil +} + +// Lists metrics. This fake implementation ignores the Parent field of +// ListMetricsRequest. All metrics are listed, regardless of their project. +func (h *metricHandler) ListLogMetrics(_ context.Context, req *logpb.ListLogMetricsRequest) (*logpb.ListLogMetricsResponse, error) { + h.mu.Lock() + var metrics []*logpb.LogMetric + for _, s := range h.metrics { + metrics = append(metrics, s) + } + h.mu.Unlock() // safe because no *logpb.LogMetric is ever modified + // Since map iteration varies, sort the metrics. + sort.Sort(metricsByName(metrics)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(metrics)) + if err != nil { + return nil, err + } + return &logpb.ListLogMetricsResponse{ + Metrics: metrics[from:to], + NextPageToken: nextPageToken, + }, nil + + return nil, nil +} + +type metricsByName []*logpb.LogMetric + +func (s metricsByName) Len() int { return len(s) } +func (s metricsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metricsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +func invalidArgument(msg string) error { + // TODO(jba): status codes + return errors.New(msg) +} diff --git a/vendor/cloud.google.com/go/preview/logging/internal/testing/fake_test.go b/vendor/cloud.google.com/go/preview/logging/internal/testing/fake_test.go new file mode 100644 index 000000000..b1267cd35 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/internal/testing/fake_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains only basic checks. The fake is effectively tested by the +// logging client unit tests. + +package testing + +import ( + "reflect" + "testing" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + logpb "google.golang.org/genproto/googleapis/logging/v2" + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + // Confirm that we can create and use a working gRPC server. + addr, err := NewServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + // Avoid "connection is closing; please retry" message from gRPC. + time.Sleep(300 * time.Millisecond) + conn.Close() +} + +func TestParseFilter(t *testing.T) { + for _, test := range []struct { + filter string + want string + wantErr bool + }{ + {"", "", false}, + {"logName = syslog", "syslog", false}, + {"logname = syslog", "", true}, + {"logName = 'syslog'", "", true}, + {"logName == syslog", "", true}, + } { + got, err := parseFilter(test.filter) + if err != nil { + if !test.wantErr { + t.Errorf("%q: got %v, want no error", test.filter, err) + } + continue + } + if test.wantErr { + t.Errorf("%q: got no error, want one", test.filter) + continue + } + if got != test.want { + t.Errorf("%q: got %q, want %q", test.filter, got, test.want) + } + } +} + +func TestSortEntries(t *testing.T) { + entries := []*logpb.LogEntry{ + /* 0 */ {Timestamp: &tspb.Timestamp{Seconds: 30}}, + /* 1 */ {Timestamp: &tspb.Timestamp{Seconds: 10}}, + /* 2 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "b"}, + /* 3 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "a"}, + /* 4 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "c"}, + } + for _, test := range []struct { + orderBy string + want []int // slice of index into entries; nil == error + }{ + {"", []int{1, 3, 2, 4, 0}}, + {"timestamp asc", []int{1, 3, 2, 4, 0}}, + {"timestamp desc", []int{0, 4, 2, 3, 1}}, + {"something else", nil}, + } { + got := make([]*logpb.LogEntry, len(entries)) + copy(got, entries) + err := sortEntries(got, test.orderBy) + if err != nil { + if test.want != nil { + t.Errorf("%q: got %v, want nil error", test.orderBy, err) + } + continue + } + want := make([]*logpb.LogEntry, len(entries)) + for i, j := range test.want { + want[i] = entries[j] + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%q: got %v, want %v", test.orderBy, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/logging.go b/vendor/cloud.google.com/go/preview/logging/logging.go new file mode 100644 index 000000000..5be87037e --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/logging.go @@ -0,0 +1,908 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// API/gRPC features intentionally missing from this client: +// - You cannot have the server pick the time of the entry. This client +// always sends a time. +// - There is no way to provide a protocol buffer payload. +// - No support for the "partial success" feature when writing log entries. + +// TODO(jba): link in google.cloud.audit.AuditLog, to support activity logs (after it is published) +// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. + +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +// Package logging contains a Stackdriver Logging client. +// The client uses Logging API v2. +// See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. +// +// This package is experimental and subject to API changes. +package logging // import "cloud.google.com/go/preview/logging" + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/internal/bundler" + vkit "cloud.google.com/go/logging/apiv2" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// For testing: +var now = time.Now + +const ( + prodAddr = "logging.googleapis.com:443" + version = "0.2.0" +) + +const ( + // Scope for reading from the logging service. + ReadScope = "https://www.googleapis.com/auth/logging.read" + + // Scope for writing to the logging service. + WriteScope = "https://www.googleapis.com/auth/logging.write" + + // Scope for administrative actions on the logging service. + AdminScope = "https://www.googleapis.com/auth/logging.admin" +) + +const ( + // defaultErrorCapacity is the capacity of the channel used to deliver + // errors to the OnError function. + defaultErrorCapacity = 10 + + // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. + DefaultDelayThreshold = time.Second + + // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. + DefaultEntryCountThreshold = 10 + + // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. + DefaultEntryByteThreshold = 1 << 20 // 1MiB + + // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. + DefaultBufferedByteLimit = 1 << 30 // 1GiB +) + +// ErrOverflow signals that the number of buffered entries for a Logger +// exceeds its BufferLimit. +var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + lClient *vkit.Client // logging client + sClient *vkit.ConfigClient // sink client + mClient *vkit.MetricsClient // metric client + projectID string + errc chan error // should be buffered to minimize dropped errors + donec chan struct{} // closed on Client.Close to close Logger bundlers + loggers sync.WaitGroup // so we can wait for loggers to close + closed bool + + // OnError is called when an error occurs in a call to Log or Flush. The + // error may be due to an invalid Entry, an overflow because BufferLimit + // was reached (in which case the error will be ErrOverflow) or an error + // communicating with the logging service. OnError is called with errors + // from all Loggers. It is never called concurrently. OnError is expected + // to return quickly; if errors occur while OnError is running, some may + // not be reported. The default behavior is to call log.Printf. + // + // This field should be set only once, before any method of Client is called. + OnError func(err error) +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses WriteScope. To use a different scope, call +// NewClient using a WithScopes option. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(WriteScope)}, + opts...) + lc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + // TODO(jba): pass along any client options that should be provided to all clients. + sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + lc.SetGoogleClientInfo("logging", version) + sc.SetGoogleClientInfo("logging", version) + mc.SetGoogleClientInfo("logging", version) + client := &Client{ + lClient: lc, + sClient: sc, + mClient: mc, + projectID: projectID, + errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors + donec: make(chan struct{}), + OnError: func(e error) { log.Printf("logging client: %v", e) }, + } + // Call the user's function synchronously, to make life easier for them. + go func() { + for err := range client.errc { + // This reference to OnError is memory-safe if the user sets OnError before + // calling any client methods. The reference happens before the first read from + // client.errc, which happens before the first write to client.errc, which + // happens before any call, which happens before the user sets OnError. + if fn := client.OnError; fn != nil { + fn(err) + } else { + log.Printf("logging (project ID %q): %v", projectID, err) + } + } + }() + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +var unixZeroTimestamp *tspb.Timestamp + +func init() { + var err error + unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) + if err != nil { + panic(err) + } +} + +// Ping reports whether the client's connection to the logging service and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping(ctx context.Context) error { + ent := &logpb.LogEntry{ + Payload: &logpb.LogEntry_TextPayload{"ping"}, + Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both + InsertId: "ping", // necessary for the service to dedup these entries. + } + _, err := c.lClient.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: c.logPath("ping"), + Resource: &mrpb.MonitoredResource{Type: "global"}, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// A Logger is used to write log messages to a single log. It can be configured +// with a log ID, common monitored resource, and a set of common labels. +type Logger struct { + client *Client + logName string // "projects/{projectID}/logs/{logID}" + stdLoggers map[Severity]*log.Logger + bundler *bundler.Bundler + + // Options + commonResource *mrpb.MonitoredResource + commonLabels map[string]string +} + +// A LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonResource sets the monitored resource associated with all log entries +// written from a Logger. If not provided, a resource of type "global" is used. +// This value can be overridden by setting an Entry's Resource field. +func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } + +type commonResource struct{ *mrpb.MonitoredResource } + +func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. This option limits the size of a +// single RPC payload, to account for network or service issues with large +// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has +// no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// Logger returns a Logger that will write entries with the given log ID, such as +// "syslog". A log ID must be less than 512 characters long and can only +// include the following characters: upper and lower case alphanumeric +// characters: [A-Za-z0-9]; and punctuation characters: forward-slash, +// underscore, hyphen, and period. +func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + l := &Logger{ + client: c, + logName: c.logPath(logID), + commonResource: &mrpb.MonitoredResource{Type: "global"}, + } + // TODO(jba): determine the right context for the bundle handler. + ctx := context.TODO() + l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { + l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) + }) + l.bundler.DelayThreshold = DefaultDelayThreshold + l.bundler.BundleCountThreshold = DefaultEntryCountThreshold + l.bundler.BundleByteThreshold = DefaultEntryByteThreshold + l.bundler.BufferedByteLimit = DefaultBufferedByteLimit + for _, opt := range opts { + opt.set(l) + } + + l.stdLoggers = map[Severity]*log.Logger{} + for s := range severityName { + l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + } + c.loggers.Add(1) + go func() { + defer c.loggers.Done() + <-c.donec + l.bundler.Close() + }() + return l +} + +func (c *Client) logPath(logID string) string { + logID = strings.Replace(logID, "/", "%2F", -1) + return fmt.Sprintf("%s/logs/%s", c.parent(), logID) +} + +type severityWriter struct { + l *Logger + s Severity +} + +func (w severityWriter) Write(p []byte) (n int, err error) { + w.l.Log(Entry{ + Severity: w.s, + Payload: string(p), + }) + return len(p), nil +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it receives new entries. +// logID identifies the log within the project. An example log ID is "syslog". Requires AdminScope. +func (c *Client) DeleteLog(ctx context.Context, logID string) error { + return c.lClient.DeleteLog(ctx, &logpb.DeleteLogRequest{ + LogName: c.logPath(logID), + }) +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + close(c.donec) // close Logger bundlers + c.loggers.Wait() // wait for all bundlers to flush and close + // Now there can be no more errors. + close(c.errc) // terminate error goroutine + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.lClient.Close() + _ = c.sClient.Close() + _ = c.mClient.Close() + c.closed = true + return err +} + +// Severity is the severity of the event described in a log entry. These +// guideline severity levels are ordered, with numerically smaller levels +// treated as less severe than numerically larger levels. +type Severity int + +const ( + // Default means the log entry has no assigned severity level. + Default = Severity(logtypepb.LogSeverity_DEFAULT) + // Debug means debug or trace information. + Debug = Severity(logtypepb.LogSeverity_DEBUG) + // Info means routine information, such as ongoing status or performance. + Info = Severity(logtypepb.LogSeverity_INFO) + // Notice means normal but significant events, such as start up, shut down, or configuration. + Notice = Severity(logtypepb.LogSeverity_NOTICE) + // Warning means events that might cause problems. + Warning = Severity(logtypepb.LogSeverity_WARNING) + // Error means events that are likely to cause problems. + Error = Severity(logtypepb.LogSeverity_ERROR) + // Critical means events that cause more severe problems or brief outages. + Critical = Severity(logtypepb.LogSeverity_CRITICAL) + // Alert means a person must take an action immediately. + Alert = Severity(logtypepb.LogSeverity_ALERT) + // Emergency means one or more systems are unusable. + Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) +) + +var severityName = map[Severity]string{ + Default: "Default", + Debug: "Debug", + Info: "Info", + Notice: "Notice", + Warning: "Warning", + Error: "Error", + Critical: "Critical", + Alert: "Alert", + Emergency: "Emergency", +} + +// String converts a severity level to a string. +func (v Severity) String() string { + // same as proto.EnumName + s, ok := severityName[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// ParseSeverity returns the Severity whose name equals s, ignoring case. It +// returns Default if no Severity matches. +func ParseSeverity(s string) Severity { + sl := strings.ToLower(s) + for sev, name := range severityName { + if strings.ToLower(name) == sl { + return sev + } + } + return Default +} + +// Entry is a log entry. +// See https://cloud.google.com/logging/docs/view/logs_index for more about entries. +type Entry struct { + // Timestamp is the time of the entry. If zero, the current time is used. + Timestamp time.Time + + // Severity is the entry's severity level. + // The zero value is Default. + Severity Severity + + // Payload must be either a string or something that + // marshals via the encoding/json package to a JSON object + // (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // The Logger.Log method takes ownership of this map. See Logger.CommonLabels + // for more about labels. + Labels map[string]string + + // InsertID is a unique ID for the log entry. If you provide this field, + // the logging service considers other log entries in the same log with the + // same ID as duplicates which can be removed. If omitted, the logging + // service will generate a unique ID for this log entry. Note that because + // this client retries RPCs automatically, it is possible (though unlikely) + // that an Entry without an InsertID will be written more than once. + InsertID string + + // HTTPRequest optionally specifies metadata about the HTTP request + // associated with this log entry, if applicable. It is optional. + HTTPRequest *HTTPRequest + + // Operation optionally provides information about an operation associated + // with the log entry, if applicable. + Operation *logpb.LogEntryOperation + + // LogName is the full log name, in the form + // "projects/{ProjectID}/logs/{LogID}". It is set by the client when + // reading entries. It is an error to set it when writing entries. + LogName string + + // Resource is the monitored resource associated with the entry. It is set + // by the client when reading entries. It is an error to set it when + // writing entries. + Resource *mrpb.MonitoredResource +} + +// HTTPRequest contains an http.Request as well as additional +// information about the request and its response. +type HTTPRequest struct { + // Request is the http.Request passed to the handler. + Request *http.Request + + // RequestSize is the size of the HTTP request message in bytes, including + // the request headers and the request body. + RequestSize int64 + + // Status is the response code indicating the status of the response. + // Examples: 200, 404. + Status int + + // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 + + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the + // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". + RemoteIP string + + // CacheHit reports whether an entity was served from cache (with or without + // validation). + CacheHit bool + + // CacheValidatedWithOriginServer reports whether the response was + // validated with the origin server before being served from cache. This + // field is only meaningful if CacheHit is true. + CacheValidatedWithOriginServer bool +} + +func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { + if r == nil { + return nil + } + if r.Request == nil { + panic("HTTPRequest must have a non-nil Request") + } + u := *r.Request.URL + u.Fragment = "" + return &logtypepb.HttpRequest{ + RequestMethod: r.Request.Method, + RequestUrl: u.String(), + RequestSize: r.RequestSize, + Status: int32(r.Status), + ResponseSize: r.ResponseSize, + UserAgent: r.Request.UserAgent(), + RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? + Referer: r.Request.Referer(), + CacheHit: r.CacheHit, + CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, + } +} + +func toHTTPRequest(p *logtypepb.HttpRequest) (*HTTPRequest, error) { + if p == nil { + return nil, nil + } + u, err := url.Parse(p.RequestUrl) + if err != nil { + return nil, err + } + hr := &http.Request{ + Method: p.RequestMethod, + URL: u, + Header: map[string][]string{}, + } + if p.UserAgent != "" { + hr.Header.Set("User-Agent", p.UserAgent) + } + if p.Referer != "" { + hr.Header.Set("Referer", p.Referer) + } + return &HTTPRequest{ + Request: hr, + RequestSize: p.RequestSize, + Status: int(p.Status), + ResponseSize: p.ResponseSize, + RemoteIP: p.RemoteIp, + CacheHit: p.CacheHit, + CacheValidatedWithOriginServer: p.CacheValidatedWithOriginServer, + }, nil +} + +// toProtoStruct converts v, which must marshal into a JSON object, +// into a Google Struct proto. +func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // v is a Go struct that supports JSON marshalling. We want a Struct + // protobuf. Some day we may have a more direct way to get there, but right + // now the only way is to marshal the Go struct to JSON, unmarshal into a + // map, and then build the Struct proto from the map. + jb, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } + var m map[string]interface{} + err = json.Unmarshal(jb, &m) + if err != nil { + return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + } + return jsonMapToProtoStruct(m), nil +} + +func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { + fields := map[string]*structpb.Value{} + for k, v := range m { + fields[k] = jsonValueToStructValue(v) + } + return &structpb.Struct{Fields: fields} +} + +func jsonValueToStructValue(v interface{}) *structpb.Value { + switch x := v.(type) { + case bool: + return &structpb.Value{Kind: &structpb.Value_BoolValue{x}} + case float64: + return &structpb.Value{Kind: &structpb.Value_NumberValue{x}} + case string: + return &structpb.Value{Kind: &structpb.Value_StringValue{x}} + case nil: + return &structpb.Value{Kind: &structpb.Value_NullValue{}} + case map[string]interface{}: + return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}} + case []interface{}: + var vals []*structpb.Value + for _, e := range x { + vals = append(vals, jsonValueToStructValue(e)) + } + return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}} + default: + panic(fmt.Sprintf("bad type %T for JSON value", v)) + } +} + +// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow +// and will block, it is intended primarily for debugging or critical errors. +// Prefer Log for most uses. +// TODO(jba): come up with a better name (LogNow?) or eliminate. +func (l *Logger) LogSync(ctx context.Context, e Entry) error { + ent, err := toLogEntry(e) + if err != nil { + return err + } + _, err = l.client.lClient.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// Log buffers the Entry for output to the logging service. It never blocks. +func (l *Logger) Log(e Entry) { + ent, err := toLogEntry(e) + if err != nil { + l.error(err) + return + } + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.error(err) + } +} + +// Flush blocks until all currently buffered log entries are sent. +func (l *Logger) Flush() { + l.bundler.Flush() +} + +func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { + req := &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + } + _, err := l.client.lClient.WriteLogEntries(ctx, req) + if err != nil { + l.error(err) + } +} + +// error puts the error on the client's error channel +// without blocking. +func (l *Logger) error(err error) { + select { + case l.client.errc <- err: + default: + } +} + +// StandardLogger returns a *log.Logger for the provided severity. +// +// This method is cheap. A single log.Logger is pre-allocated for each +// severity level in each Logger. Callers may mutate the returned log.Logger +// (for example by calling SetFlags or SetPrefix). +func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } + +// An EntriesOption is an option for listing log entries. +type EntriesOption interface { + set(*logpb.ListLogEntriesRequest) +} + +// ProjectIDs sets the project IDs or project numbers from which to retrieve +// log entries. Examples of a project ID: "my-project-1A", "1234567890". +func ProjectIDs(pids []string) EntriesOption { return projectIDs(pids) } + +type projectIDs []string + +func (p projectIDs) set(r *logpb.ListLogEntriesRequest) { r.ProjectIds = []string(p) } + +// Filter sets an advanced logs filter for listing log entries (see +// https://cloud.google.com/logging/docs/view/advanced_filters). The filter is +// compared against all log entries in the projects specified by ProjectIDs. +// Only entries that match the filter are retrieved. An empty filter (the +// default) matches all log entries. +// +// In the filter string, log names must be written in their full form, as +// "projects/PROJECT-ID/logs/LOG-ID". Forward slashes in LOG-ID must be +// replaced by %2F before calling Filter. +// +// Timestamps in the filter string must be written in RFC 3339 format. See the +// timestamp example. +func Filter(f string) EntriesOption { return filter(f) } + +type filter string + +func (f filter) set(r *logpb.ListLogEntriesRequest) { r.Filter = string(f) } + +// NewestFirst causes log entries to be listed from most recent (newest) to +// least recent (oldest). By default, they are listed from oldest to newest. +func NewestFirst() EntriesOption { return newestFirst{} } + +type newestFirst struct{} + +func (newestFirst) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = "timestamp desc" } + +// OrderBy determines how a listing of log entries should be sorted. Presently, +// the only permitted values are "timestamp asc" (default) and "timestamp +// desc". The first option returns entries in order of increasing values of +// timestamp (oldest first), and the second option returns entries in order of +// decreasing timestamps (newest first). Entries with equal timestamps are +// returned in order of InsertID. +func OrderBy(ob string) EntriesOption { return orderBy(ob) } + +type orderBy string + +func (o orderBy) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = string(o) } + +// Entries returns an EntryIterator for iterating over log entries. By default, +// the log entries will be restricted to those from the project passed to +// NewClient. This may be overridden by passing a ProjectIDs option. Requires ReadScope or AdminScope. +func (c *Client) Entries(ctx context.Context, opts ...EntriesOption) *EntryIterator { + it := &EntryIterator{ + ctx: ctx, + client: c.lClient, + req: listLogEntriesRequest(c.projectID, opts), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +func listLogEntriesRequest(projectID string, opts []EntriesOption) *logpb.ListLogEntriesRequest { + req := &logpb.ListLogEntriesRequest{ + ProjectIds: []string{projectID}, + } + for _, opt := range opts { + opt.set(req) + } + return req +} + +// An EntryIterator iterates over log entries. +type EntryIterator struct { + ctx context.Context + client *vkit.Client + pageInfo *iterator.PageInfo + nextFunc func() error + req *logpb.ListLogEntriesRequest + items []*Entry +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *EntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *EntryIterator) Next() (*Entry, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EntryIterator) fetch(pageSize int, pageToken string) (string, error) { + // TODO(jba): Do this a nicer way if the generated code supports one. + // TODO(jba): If the above TODO can't be done, find a way to pass metadata in the call. + client := logpb.NewLoggingServiceV2Client(it.client.Connection()) + var res *logpb.ListLogEntriesResponse + err := gax.Invoke(it.ctx, func(ctx context.Context) error { + it.req.PageSize = trunc32(pageSize) + it.req.PageToken = pageToken + var err error + res, err = client.ListLogEntries(ctx, it.req) + return err + }, it.client.CallOptions.ListLogEntries...) + if err != nil { + return "", err + } + for _, ep := range res.Entries { + e, err := fromLogEntry(ep) + if err != nil { + return "", err + } + it.items = append(it.items, e) + } + return res.NextPageToken, nil +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func toLogEntry(e Entry) (*logpb.LogEntry, error) { + if e.LogName != "" { + return nil, errors.New("logging: Entry.LogName should be not be set when writing") + } + t := e.Timestamp + if t.IsZero() { + t = now() + } + ts, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + ent := &logpb.LogEntry{ + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + } + + switch p := e.Payload.(type) { + case string: + ent.Payload = &logpb.LogEntry_TextPayload{p} + default: + s, err := toProtoStruct(p) + if err != nil { + return nil, err + } + ent.Payload = &logpb.LogEntry_JsonPayload{s} + } + return ent, nil +} + +var slashUnescaper = strings.NewReplacer("%2F", "/", "%2f", "/") + +func fromLogEntry(le *logpb.LogEntry) (*Entry, error) { + time, err := ptypes.Timestamp(le.Timestamp) + if err != nil { + return nil, err + } + var payload interface{} + switch x := le.Payload.(type) { + case *logpb.LogEntry_TextPayload: + payload = x.TextPayload + + case *logpb.LogEntry_ProtoPayload: + var d ptypes.DynamicAny + if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { + return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) + } + payload = d.Message + + case *logpb.LogEntry_JsonPayload: + // Leave this as a Struct. + // TODO(jba): convert to map[string]interface{}? + payload = x.JsonPayload + + default: + return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) + } + hr, err := toHTTPRequest(le.HttpRequest) + if err != nil { + return nil, err + } + return &Entry{ + Timestamp: time, + Severity: Severity(le.Severity), + Payload: payload, + Labels: le.Labels, + InsertID: le.InsertId, + HTTPRequest: hr, + Operation: le.Operation, + LogName: slashUnescaper.Replace(le.LogName), + Resource: le.Resource, + }, nil +} diff --git a/vendor/cloud.google.com/go/preview/logging/logging_test.go b/vendor/cloud.google.com/go/preview/logging/logging_test.go new file mode 100644 index 000000000..d11bdb546 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/logging_test.go @@ -0,0 +1,678 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logging + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" + + "cloud.google.com/go/internal/bundler" + "cloud.google.com/go/internal/testutil" + ltesting "cloud.google.com/go/preview/logging/internal/testing" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" +) + +const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG" + +var ( + client *Client + testProjectID string + testLogID string + testFilter string + errorc chan error + + // Adjust the fields of a FullEntry received from the production service + // before comparing it with the expected result. We can't correctly + // compare certain fields, like times or server-generated IDs. + clean func(*Entry) + + // Create a new client with the given project ID. + newClient func(ctx context.Context, projectID string) *Client +) + +func testNow() time.Time { + return time.Unix(1000, 0) +} + +// If true, this test is using the production service, not a fake. +var integrationTest bool + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + testProjectID = testutil.ProjID() + errorc = make(chan error, 100) + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + clean = func(e *Entry) { + // Remove the insert ID for consistency with the integration test. + e.InsertID = "" + } + + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + now = testNow + newClient = func(ctx context.Context, projectID string) *Client { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c + } + } else { + integrationTest = true + clean = func(e *Entry) { + // We cannot compare timestamps, so set them to the test time. + // Also, remove the insert ID added by the service. + e.Timestamp = testNow().UTC() + e.InsertID = "" + } + ts := testutil.TokenSource(ctx, AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClient = func(ctx context.Context, projectID string) *Client { + c, err := NewClient(ctx, projectID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c + } + } + client = newClient(ctx, testProjectID) + client.OnError = func(e error) { errorc <- e } + initLogs(ctx) + initMetrics(ctx) + cleanup := initSinks(ctx) + testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, + strings.Replace(testLogID, "/", "%2F", -1)) + exit := m.Run() + cleanup() + client.Close() + os.Exit(exit) +} + +// waitFor calls f periodically, blocking until it returns true. +// It calls log.Fatal after one minute. +func waitFor(f func() bool) { + timeout := time.NewTimer(1 * time.Minute) + for { + select { + case <-time.After(1 * time.Second): + if f() { + timeout.Stop() + return + } + case <-timeout.C: + log.Fatal("timed out") + } + } +} + +func initLogs(ctx context.Context) { + testLogID = uniqueID(testLogIDPrefix) + // TODO(jba): Clean up from previous aborted tests by deleting old logs; requires ListLogs RPC. +} + +func TestLoggerCreation(t *testing.T) { + c := &Client{projectID: "PROJECT_ID"} + defaultResource := &mrpb.MonitoredResource{Type: "global"} + defaultBundler := &bundler.Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultEntryCountThreshold, + BundleByteThreshold: DefaultEntryByteThreshold, + BundleByteLimit: 0, + BufferedByteLimit: DefaultBufferedByteLimit, + } + for _, test := range []struct { + options []LoggerOption + wantLogger *Logger + wantBundler *bundler.Bundler + }{ + {nil, &Logger{commonResource: defaultResource}, defaultBundler}, + { + []LoggerOption{CommonResource(nil), CommonLabels(map[string]string{"a": "1"})}, + &Logger{commonResource: nil, commonLabels: map[string]string{"a": "1"}}, + defaultBundler, + }, + { + []LoggerOption{DelayThreshold(time.Minute), EntryCountThreshold(99), + EntryByteThreshold(17), EntryByteLimit(18), BufferedByteLimit(19)}, + &Logger{commonResource: defaultResource}, + &bundler.Bundler{ + DelayThreshold: time.Minute, + BundleCountThreshold: 99, + BundleByteThreshold: 17, + BundleByteLimit: 18, + BufferedByteLimit: 19, + }, + }, + } { + gotLogger := c.Logger(testLogID, test.options...) + if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !reflect.DeepEqual(got, want) { + t.Errorf("%v: resource: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !reflect.DeepEqual(got, want) { + t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want { + t.Errorf("%v: DelayThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleCountThreshold, test.wantBundler.BundleCountThreshold; got != want { + t.Errorf("%v: BundleCountThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteThreshold, test.wantBundler.BundleByteThreshold; got != want { + t.Errorf("%v: BundleByteThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteLimit, test.wantBundler.BundleByteLimit; got != want { + t.Errorf("%v: BundleByteLimit: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BufferedByteLimit, test.wantBundler.BufferedByteLimit; got != want { + t.Errorf("%v: BufferedByteLimit: got %v, want %v", test.options, got, want) + } + } +} + +func TestLogSync(t *testing.T) { + ctx := context.Background() + lg := client.Logger(testLogID) + defer deleteLog(ctx, testLogID) + err := lg.LogSync(ctx, Entry{Payload: "hello"}) + if err != nil { + t.Fatal(err) + } + err = lg.LogSync(ctx, Entry{Payload: "goodbye"}) + if err != nil { + t.Fatal(err) + } + // Allow overriding the MonitoredResource. + err = lg.LogSync(ctx, Entry{Payload: "mr", Resource: &mrpb.MonitoredResource{Type: "global"}}) + if err != nil { + t.Fatal(err) + } + + want := []*Entry{ + entryForTesting("hello"), + entryForTesting("goodbye"), + entryForTesting("mr"), + } + var got []*Entry + waitFor(func() bool { + got, err = allTestLogEntries(ctx) + if err != nil { + return false + } + return len(got) >= len(want) + }) + if len(got) != len(want) { + t.Fatalf("got %d entries, want %d", len(got), len(want)) + } + for i := range got { + if !reflect.DeepEqual(got[i], want[i]) { + t.Errorf("#%d:\ngot %+v\nwant %+v", i, got[i], want[i]) + } + } +} + +func entryForTesting(payload interface{}) *Entry { + return &Entry{ + Timestamp: testNow().UTC(), + Payload: payload, + LogName: "projects/" + testProjectID + "/logs/" + testLogID, + Resource: &mrpb.MonitoredResource{Type: "global"}, + } +} + +func countLogEntries(ctx context.Context, filter string) int { + it := client.Entries(ctx, Filter(filter)) + n := 0 + for { + _, err := it.Next() + if err == iterator.Done { + return n + } + if err != nil { + log.Fatalf("counting log entries: %v", err) + } + n++ + } +} + +func allTestLogEntries(ctx context.Context) ([]*Entry, error) { + var es []*Entry + it := client.Entries(ctx, Filter(testFilter)) + for { + e, err := cleanNext(it) + switch err { + case nil: + es = append(es, e) + case iterator.Done: + return es, nil + default: + return nil, err + } + } +} + +func cleanNext(it *EntryIterator) (*Entry, error) { + e, err := it.Next() + if err != nil { + return nil, err + } + clean(e) + return e, nil +} + +func TestLogAndEntries(t *testing.T) { + ctx := context.Background() + payloads := []string{"p1", "p2", "p3", "p4", "p5"} + lg := client.Logger(testLogID) + defer deleteLog(ctx, testLogID) + for _, p := range payloads { + // Use the insert ID to guarantee iteration order. + lg.Log(Entry{Payload: p, InsertID: p}) + } + lg.Flush() + var want []*Entry + for _, p := range payloads { + want = append(want, entryForTesting(p)) + } + waitFor(func() bool { return countLogEntries(ctx, testFilter) >= len(want) }) + it := client.Entries(ctx, Filter(testFilter)) + msg, ok := testutil.TestIteratorNext(want, iterator.Done, func() (interface{}, error) { return cleanNext(it) }) + if !ok { + t.Fatal(msg) + } + // TODO(jba): test exact paging. +} + +func TestStandardLogger(t *testing.T) { + ctx := context.Background() + lg := client.Logger(testLogID) + defer deleteLog(ctx, testLogID) + slg := lg.StandardLogger(Info) + + if slg != lg.StandardLogger(Info) { + t.Error("There should be only one standard logger at each severity.") + } + if slg == lg.StandardLogger(Debug) { + t.Error("There should be a different standard logger for each severity.") + } + + slg.Print("info") + lg.Flush() + waitFor(func() bool { return countLogEntries(ctx, testFilter) > 0 }) + got, err := allTestLogEntries(ctx) + if err != nil { + t.Fatal(err) + } + if len(got) != 1 { + t.Fatalf("expected non-nil request with one entry; got:\n%+v", got) + } + if got, want := got[0].Payload.(string), "info\n"; got != want { + t.Errorf("payload: got %q, want %q", got, want) + } + if got, want := Severity(got[0].Severity), Info; got != want { + t.Errorf("severity: got %s, want %s", got, want) + } +} + +func TestToProtoStruct(t *testing.T) { + v := struct { + Foo string `json:"foo"` + Bar int `json:"bar,omitempty"` + Baz []float64 `json:"baz"` + Moo map[string]interface{} `json:"moo"` + }{ + Foo: "foovalue", + Baz: []float64{1.1}, + Moo: map[string]interface{}{ + "a": 1, + "b": "two", + "c": true, + }, + } + + got, err := toProtoStruct(v) + if err != nil { + t.Fatal(err) + } + want := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{v.Foo}}, + "baz": {Kind: &structpb.Value_ListValue{&structpb.ListValue{ + []*structpb.Value{{Kind: &structpb.Value_NumberValue{1.1}}}}}}, + "moo": {Kind: &structpb.Value_StructValue{ + &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{1}}, + "b": {Kind: &structpb.Value_StringValue{"two"}}, + "c": {Kind: &structpb.Value_BoolValue{true}}, + }, + }, + }}, + }, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } + + // Non-structs should fail to convert. + for v := range []interface{}{3, "foo", []int{1, 2, 3}} { + _, err := toProtoStruct(v) + if err == nil { + t.Errorf("%v: got nil, want error", v) + } + } +} + +func textPayloads(req *logpb.WriteLogEntriesRequest) []string { + if req == nil { + return nil + } + var ps []string + for _, e := range req.Entries { + ps = append(ps, e.GetTextPayload()) + } + return ps +} + +func TestFromLogEntry(t *testing.T) { + res := &mrpb.MonitoredResource{Type: "global"} + ts, err := ptypes.TimestampProto(testNow()) + if err != nil { + t.Fatal(err) + } + logEntry := logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Payload: &logpb.LogEntry_TextPayload{"hello"}, + Timestamp: ts, + Severity: logtypepb.LogSeverity_INFO, + InsertId: "123", + HttpRequest: &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: "http:://example.com/path?q=1", + RequestSize: 100, + Status: 200, + ResponseSize: 25, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + } + u, err := url.Parse("http:://example.com/path?q=1") + if err != nil { + t.Fatal(err) + } + want := &Entry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: testNow().In(time.UTC), + Severity: Info, + Payload: "hello", + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + InsertID: "123", + HTTPRequest: &HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + } + got, err := fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + // Test sub-values separately because %+v and %#v do not follow pointers. + // TODO(jba): use a differ or pretty-printer. + if !reflect.DeepEqual(got.HTTPRequest.Request, want.HTTPRequest.Request) { + t.Fatalf("HTTPRequest.Request:\ngot %+v\nwant %+v", got.HTTPRequest.Request, want.HTTPRequest.Request) + } + if !reflect.DeepEqual(got.HTTPRequest, want.HTTPRequest) { + t.Fatalf("HTTPRequest:\ngot %+v\nwant %+v", got.HTTPRequest, want.HTTPRequest) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("FullEntry:\ngot %+v\nwant %+v", got, want) + } +} + +func TestListLogEntriesRequest(t *testing.T) { + for _, test := range []struct { + opts []EntriesOption + projectIDs []string + filter string + orderBy string + }{ + // Default is client's project ID, empty filter and orderBy. + {nil, + []string{"PROJECT_ID"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f")}, + []string{"PROJECT_ID"}, "f", "timestamp desc"}, + {[]EntriesOption{ProjectIDs([]string{"foo"})}, + []string{"foo"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + // If there are repeats, last one wins. + {[]EntriesOption{NewestFirst(), Filter("no"), ProjectIDs([]string{"foo"}), Filter("f")}, + []string{"foo"}, "f", "timestamp desc"}, + } { + got := listLogEntriesRequest("PROJECT_ID", test.opts) + want := &logpb.ListLogEntriesRequest{ + ProjectIds: test.projectIDs, + Filter: test.filter, + OrderBy: test.orderBy, + } + if !proto.Equal(got, want) { + t.Errorf("%v:\ngot %v\nwant %v", test.opts, got, want) + } + } +} + +func TestSeverity(t *testing.T) { + if got, want := Info.String(), "Info"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if got, want := Severity(-99).String(), "-99"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestParseSeverity(t *testing.T) { + for _, test := range []struct { + in string + want Severity + }{ + {"", Default}, + {"whatever", Default}, + {"Default", Default}, + {"ERROR", Error}, + {"Error", Error}, + {"error", Error}, + } { + got := ParseSeverity(test.in) + if got != test.want { + t.Errorf("%q: got %s, want %s\n", test.in, got, test.want) + } + } +} + +func TestErrors(t *testing.T) { + // Drain errors already seen. +loop: + for { + select { + case <-errorc: + default: + break loop + } + } + // Try to log something that can't be JSON-marshalled. + lg := client.Logger(testLogID) + lg.Log(Entry{Payload: func() {}}) + // Expect an error. + select { + case <-errorc: // pass + case <-time.After(100 * time.Millisecond): + t.Fatal("expected an error but timed out") + } +} + +type badTokenSource struct{} + +func (badTokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + +func TestPing(t *testing.T) { + // Ping twice, in case the service's InsertID logic messes with the error code. + ctx := context.Background() + // The global client should be valid. + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s: got %v, expected nil", testProjectID, err) + } + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) + } + // nonexistent project + c := newClient(ctx, testProjectID+"-BAD") + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project, #2: want error pinging logging api, got nil") + } + + // Bad creds. We cannot test this with the fake, since it doesn't do auth. + if integrationTest { + c, err := NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{})) + if err != nil { + t.Fatal(err) + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds, #2: want error pinging logging api, got nil") + } + if err := c.Close(); err != nil { + t.Fatalf("error closing client: %v", err) + } + } +} + +func TestFromHTTPRequest(t *testing.T) { + const testURL = "http:://example.com/path?q=1" + u, err := url.Parse(testURL) + if err != nil { + t.Fatal(err) + } + req := &HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + got := fromHTTPRequest(req) + want := &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: testURL, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +// deleteLog is used to clean up a log after a test that writes to it. +func deleteLog(ctx context.Context, logID string) { + client.DeleteLog(ctx, logID) + // DeleteLog can take some time to happen, so we wait for the log to + // disappear. There is no direct way to determine if a log exists, so we + // just wait until there are no log entries associated with the ID. + filter := fmt.Sprintf(`logName = "%s"`, client.logPath(logID)) + waitFor(func() bool { return countLogEntries(ctx, filter) == 0 }) +} diff --git a/vendor/cloud.google.com/go/preview/logging/metrics.go b/vendor/cloud.google.com/go/preview/logging/metrics.go new file mode 100644 index 000000000..3a2450951 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/metrics.go @@ -0,0 +1,169 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Metric describes a logs-based metric. The value of the metric is the +// number of log entries that match a logs filter. +// +// Metrics are a feature of Stackdriver Monitoring. +// See https://cloud.google.com/monitoring/api/v3/metrics for more about them. +type Metric struct { + // ID is a client-assigned metric identifier. Example: + // "severe_errors". Metric identifiers are limited to 1000 + // characters and can include only the following characters: A-Z, + // a-z, 0-9, and the special characters _-.,+!*',()%/\. The + // forward-slash character (/) denotes a hierarchy of name pieces, + // and it cannot be the first character of the name. + ID string + + // Description describes this metric. It is used in documentation. + Description string + + // Filter is an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters). + // Example: "logName:syslog AND severity>=ERROR". + Filter string +} + +// CreateMetric creates a logs-based metric. +func (c *Client) CreateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.CreateLogMetric(ctx, &logpb.CreateLogMetricRequest{ + Parent: c.parent(), + Metric: toLogMetric(m), + }) + return err +} + +// DeleteMetric deletes a log-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +func (c *Client) DeleteMetric(ctx context.Context, metricID string) error { + return c.mClient.DeleteLogMetric(ctx, &logpb.DeleteLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) +} + +// Metric gets a logs-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +// Requires ReadScope or AdminScope. +func (c *Client) Metric(ctx context.Context, metricID string) (*Metric, error) { + lm, err := c.mClient.GetLogMetric(ctx, &logpb.GetLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) + if err != nil { + return nil, err + } + return fromLogMetric(lm), nil +} + +// UpdateMetric creates a logs-based metric if it does not exist, or updates an +// existing one. +func (c *Client) UpdateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.UpdateLogMetric(ctx, &logpb.UpdateLogMetricRequest{ + MetricName: c.metricPath(m.ID), + Metric: toLogMetric(m), + }) + return err +} + +func (c *Client) metricPath(metricID string) string { + return fmt.Sprintf("%s/metrics/%s", c.parent(), metricID) +} + +// Metrics returns a MetricIterator for iterating over all Metrics in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Metrics(ctx context.Context) *MetricIterator { + it := &MetricIterator{ + ctx: ctx, + client: c.mClient, + req: &logpb.ListLogMetricsRequest{Parent: c.parent()}, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A MetricIterator iterates over Metrics. +type MetricIterator struct { + ctx context.Context + client *vkit.MetricsClient + pageInfo *iterator.PageInfo + nextFunc func() error + req *logpb.ListLogMetricsRequest + items []*Metric +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *MetricIterator) Next() (*Metric, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricIterator) fetch(pageSize int, pageToken string) (string, error) { + // TODO(jba): Do this a nicer way if the generated code supports one. + // TODO(jba): If the above TODO can't be done, find a way to pass metadata in the call. + client := logpb.NewMetricsServiceV2Client(it.client.Connection()) + var res *logpb.ListLogMetricsResponse + err := gax.Invoke(it.ctx, func(ctx context.Context) error { + it.req.PageSize = trunc32(pageSize) + it.req.PageToken = pageToken + var err error + res, err = client.ListLogMetrics(ctx, it.req) + return err + }, it.client.CallOptions.ListLogMetrics...) + if err != nil { + return "", err + } + for _, sp := range res.Metrics { + it.items = append(it.items, fromLogMetric(sp)) + } + return res.NextPageToken, nil +} + +func toLogMetric(m *Metric) *logpb.LogMetric { + return &logpb.LogMetric{ + Name: m.ID, + Description: m.Description, + Filter: m.Filter, + } +} + +func fromLogMetric(lm *logpb.LogMetric) *Metric { + return &Metric{ + ID: lm.Name, + Description: lm.Description, + Filter: lm.Filter, + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/metrics_test.go b/vendor/cloud.google.com/go/preview/logging/metrics_test.go new file mode 100644 index 000000000..ab651f9c9 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/metrics_test.go @@ -0,0 +1,140 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "log" + "reflect" + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +const testMetricIDPrefix = "GO-CLIENT-TEST-METRIC" + +// Initializes the tests before they run. +func initMetrics(ctx context.Context) { + // Clean up from aborted tests. + var IDs []string + it := client.Metrics(ctx) +loop: + for { + m, err := it.Next() + switch err { + case nil: + IDs = append(IDs, m.ID) + case iterator.Done: + break loop + default: + log.Printf("cleanupMetrics: %v", err) + return + } + } + for _, mID := range expiredUniqueIDs(IDs, testMetricIDPrefix) { + client.DeleteMetric(ctx, mID) + } +} + +func TestCreateDeleteMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: uniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + } + if err := client.CreateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteMetric(ctx, metric.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Metric(ctx, metric.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: uniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + } + + // Updating a non-existent metric creates a new one. + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing metric changes it. + metric.Description = "CHANGED" + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + got, err = client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListMetrics(t *testing.T) { + ctx := context.Background() + + var metrics []*Metric + for i := 0; i < 10; i++ { + metrics = append(metrics, &Metric{ + ID: uniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + }) + } + for _, m := range metrics { + if err := client.CreateMetric(ctx, m); err != nil { + t.Fatalf("Create(%q): %v", m.ID, err) + } + defer client.DeleteMetric(ctx, m.ID) + } + + it := client.Metrics(ctx) + msg, ok := testutil.TestIteratorNext(metrics, iterator.Done, func() (interface{}, error) { return it.Next() }) + if !ok { + t.Fatal(msg) + } + // TODO(jba): test exact paging. +} diff --git a/vendor/cloud.google.com/go/preview/logging/resources.go b/vendor/cloud.google.com/go/preview/logging/resources.go new file mode 100644 index 000000000..401e2c894 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/resources.go @@ -0,0 +1,83 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + vkit "cloud.google.com/go/logging/apiv2" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// ResourceDescriptors returns a ResourceDescriptorIterator +// for iterating over MonitoredResourceDescriptors. Requires ReadScope or AdminScope. +// See https://cloud.google.com/logging/docs/api/v2/#monitored-resources for an explanation of +// monitored resources. +// See https://cloud.google.com/logging/docs/api/v2/resource-list for a list of monitored resources. +func (c *Client) ResourceDescriptors(ctx context.Context) *ResourceDescriptorIterator { + it := &ResourceDescriptorIterator{ + ctx: ctx, + client: c.lClient, + req: &logpb.ListMonitoredResourceDescriptorsRequest{}, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// ResourceDescriptorIterator is an iterator over MonitoredResourceDescriptors. +type ResourceDescriptorIterator struct { + ctx context.Context + client *vkit.Client + pageInfo *iterator.PageInfo + nextFunc func() error + req *logpb.ListMonitoredResourceDescriptorsRequest + items []*mrpb.MonitoredResourceDescriptor +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *ResourceDescriptorIterator) Next() (*mrpb.MonitoredResourceDescriptor, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ResourceDescriptorIterator) fetch(pageSize int, pageToken string) (string, error) { + // TODO(jba): Do this a nicer way if the generated code supports one. + // TODO(jba): If the above TODO can't be done, find a way to pass metadata in the call. + client := logpb.NewLoggingServiceV2Client(it.client.Connection()) + var res *logpb.ListMonitoredResourceDescriptorsResponse + err := gax.Invoke(it.ctx, func(ctx context.Context) error { + it.req.PageSize = trunc32(pageSize) + it.req.PageToken = pageToken + var err error + res, err = client.ListMonitoredResourceDescriptors(ctx, it.req) + return err + }, it.client.CallOptions.ListMonitoredResourceDescriptors...) + if err != nil { + return "", err + } + it.items = append(it.items, res.ResourceDescriptors...) + return res.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/preview/logging/resources_test.go b/vendor/cloud.google.com/go/preview/logging/resources_test.go new file mode 100644 index 000000000..0fe8e21c6 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/resources_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func TestMonitoredResourceDescriptors(t *testing.T) { + // We can't create MonitoredResourceDescriptors, and there is no guarantee + // about what the service will return. So we just check that the result is + // non-empty. + it := client.ResourceDescriptors(context.Background()) + n := 0 +loop: + for { + _, err := it.Next() + switch err { + case nil: + n++ + case iterator.Done: + break loop + default: + t.Fatal(err) + } + } + if n == 0 { + t.Fatal("Next: got no MetricResourceDescriptors, expected at least one") + } + // TODO(jba) test pagination. +} diff --git a/vendor/cloud.google.com/go/preview/logging/sinks.go b/vendor/cloud.google.com/go/preview/logging/sinks.go new file mode 100644 index 000000000..f5215f2d9 --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/sinks.go @@ -0,0 +1,184 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Sink describes a sink used to export log entries outside Stackdriver +// Logging. Incoming log entries matching a filter are exported to a +// destination (a Cloud Storage bucket, BigQuery dataset or Cloud Pub/Sub +// topic). +// +// For more information, see https://cloud.google.com/logging/docs/export/using_exported_logs. +// (The Sinks in this package are what the documentation refers to as "project sinks".) +type Sink struct { + // ID is a client-assigned sink identifier. Example: + // "my-severe-errors-to-pubsub". + // Sink identifiers are limited to 1000 characters + // and can include only the following characters: A-Z, a-z, + // 0-9, and the special characters "_-.". + ID string + + // Destination is the export destination. See + // https://cloud.google.com/logging/docs/api/tasks/exporting-logs. + // Examples: "storage.googleapis.com/a-bucket", + // "bigquery.googleapis.com/projects/a-project-id/datasets/a-dataset". + Destination string + + // Filter optionally specifies an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters) that + // defines the log entries to be exported. Example: "logName:syslog AND + // severity>=ERROR". If omitted, all entries are returned. + Filter string +} + +// CreateSink creates a Sink. It returns an error if the Sink already exists. +// Requires AdminScope. +func (c *Client) CreateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.CreateSink(ctx, &logpb.CreateSinkRequest{ + Parent: c.parent(), + Sink: toLogSink(sink), + }) + if err != nil { + fmt.Printf("Sink: %+v\n", toLogSink(sink)) + return nil, err + } + return fromLogSink(ls), nil +} + +// DeleteSink deletes a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires AdminScope. +func (c *Client) DeleteSink(ctx context.Context, sinkID string) error { + return c.sClient.DeleteSink(ctx, &logpb.DeleteSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) +} + +// Sink gets a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires ReadScope or AdminScope. +func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) { + ls, err := c.sClient.GetSink(ctx, &logpb.GetSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), nil +} + +// UpdateSink updates an existing Sink, or creates a new one if the Sink doesn't exist. +// Requires AdminScope. +func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ + SinkName: c.sinkPath(sink.ID), + Sink: toLogSink(sink), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), err +} + +func (c *Client) sinkPath(sinkID string) string { + return fmt.Sprintf("%s/sinks/%s", c.parent(), sinkID) +} + +// Sinks returns a SinkIterator for iterating over all Sinks in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Sinks(ctx context.Context) *SinkIterator { + it := &SinkIterator{ + ctx: ctx, + client: c.sClient, + req: &logpb.ListSinksRequest{Parent: c.parent()}, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A SinkIterator iterates over Sinks. +type SinkIterator struct { + ctx context.Context + client *vkit.ConfigClient + pageInfo *iterator.PageInfo + nextFunc func() error + req *logpb.ListSinksRequest + items []*Sink +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *SinkIterator) Next() (*Sink, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SinkIterator) fetch(pageSize int, pageToken string) (string, error) { + // TODO(jba): Do this a nicer way if the generated code supports one. + // TODO(jba): If the above TODO can't be done, find a way to pass metadata in the call. + client := logpb.NewConfigServiceV2Client(it.client.Connection()) + var res *logpb.ListSinksResponse + err := gax.Invoke(it.ctx, func(ctx context.Context) error { + it.req.PageSize = trunc32(pageSize) + it.req.PageToken = pageToken + var err error + res, err = client.ListSinks(ctx, it.req) + return err + }, it.client.CallOptions.ListSinks...) + if err != nil { + return "", err + } + for _, sp := range res.Sinks { + it.items = append(it.items, fromLogSink(sp)) + } + return res.NextPageToken, nil +} + +func toLogSink(s *Sink) *logpb.LogSink { + return &logpb.LogSink{ + Name: s.ID, + Destination: s.Destination, + Filter: s.Filter, + OutputVersionFormat: logpb.LogSink_V2, + } +} + +func fromLogSink(ls *logpb.LogSink) *Sink { + return &Sink{ + ID: ls.Name, + Destination: ls.Destination, + Filter: ls.Filter, + } +} diff --git a/vendor/cloud.google.com/go/preview/logging/sinks_test.go b/vendor/cloud.google.com/go/preview/logging/sinks_test.go new file mode 100644 index 000000000..5539dd14c --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/sinks_test.go @@ -0,0 +1,214 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): document in CONTRIBUTING.md that service account must be given "Logs Configuration Writer" IAM role for sink tests to pass. +// TODO(jba): [cont] (1) From top left menu, go to IAM & Admin. (2) In Roles dropdown for acct, select Logging > Logs Configuration Writer. (3) Save. +// TODO(jba): Also, cloud-logs@google.com must have Owner permission on the GCS bucket named for the test project. + +package logging + +import ( + "log" + "reflect" + "testing" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +const testSinkIDPrefix = "GO-CLIENT-TEST-SINK" + +var testSinkDestination string + +// Called just before TestMain calls m.Run. +// Returns a cleanup function to be called after the tests finish. +func initSinks(ctx context.Context) func() { + // Create a unique GCS bucket so concurrent tests don't interfere with each other. + testBucketPrefix := testProjectID + "-log-sink" + testBucket := uniqueID(testBucketPrefix) + testSinkDestination = "storage.googleapis.com/" + testBucket + var storageClient *storage.Client + if integrationTest { + // Create a unique bucket as a sink destination, and give the cloud logging account + // owner right. + ts := testutil.TokenSource(ctx, storage.ScopeFullControl) + var err error + storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("new storage client: %v", err) + } + bucket := storageClient.Bucket(testBucket) + if err := bucket.Create(ctx, testProjectID, nil); err != nil { + log.Fatalf("creating storage bucket %q: %v", testBucket, err) + } + if err := bucket.ACL().Set(ctx, "group-cloud-logs@google.com", storage.RoleOwner); err != nil { + log.Fatalf("setting owner role: %v", err) + } + } + // Clean up from aborted tests. + for _, sID := range expiredUniqueIDs(sinkIDs(ctx), testSinkIDPrefix) { + client.DeleteSink(ctx, sID) // ignore error + } + if integrationTest { + for _, bn := range expiredUniqueIDs(bucketNames(ctx, storageClient), testBucketPrefix) { + storageClient.Bucket(bn).Delete(ctx) // ignore error + } + return func() { + if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { + log.Printf("deleting %q: %v", testBucket, err) + } + storageClient.Close() + } + } + return func() {} +} + +// Collect all sink IDs for the test project. +func sinkIDs(ctx context.Context) []string { + var IDs []string + it := client.Sinks(ctx) +loop: + for { + s, err := it.Next() + switch err { + case nil: + IDs = append(IDs, s.ID) + case iterator.Done: + break loop + default: + log.Printf("listing sinks: %v", err) + break loop + } + } + return IDs +} + +// Collect the name of all buckets for the test project. +func bucketNames(ctx context.Context, client *storage.Client) []string { + var names []string + it := client.Buckets(ctx, testProjectID) +loop: + for { + b, err := it.Next() + switch err { + case nil: + names = append(names, b.Name) + case iterator.Done: + break loop + default: + log.Printf("listing buckets: %v", err) + break loop + } + } + return names +} + +func TestCreateDeleteSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: uniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + } + got, err := client.CreateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteSink(ctx, sink.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Sink(ctx, sink.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: uniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + } + + // Updating a non-existent sink creates a new one. + got, err := client.UpdateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing sink changes it. + sink.Filter = "" + if _, err := client.UpdateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListSinks(t *testing.T) { + ctx := context.Background() + var sinks []*Sink + for i := 0; i < 4; i++ { + sinks = append(sinks, &Sink{ + ID: uniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + }) + } + for _, s := range sinks { + if _, err := client.CreateSink(ctx, s); err != nil { + t.Fatalf("Create(%q): %v", s.ID, err) + } + defer client.DeleteSink(ctx, s.ID) + } + + it := client.Sinks(ctx) + msg, ok := testutil.TestIteratorNext(sinks, iterator.Done, func() (interface{}, error) { return it.Next() }) + if !ok { + t.Fatal(msg) + } + // TODO(jba): test exact paging. +} diff --git a/vendor/cloud.google.com/go/preview/logging/unique_test.go b/vendor/cloud.google.com/go/preview/logging/unique_test.go new file mode 100644 index 000000000..346b0037f --- /dev/null +++ b/vendor/cloud.google.com/go/preview/logging/unique_test.go @@ -0,0 +1,122 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file supports generating unique IDs so that multiple test executions +// don't interfere with each other, and cleaning up old entities that may +// remain if tests exit early. + +package logging + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +var ( + startTime = time.Now() + uniqueIDCounter int + // Items older than expiredAge are remnants from previous tests and can be deleted. + expiredAge = 24 * time.Hour +) + +// uniqueID generates unique IDs so tests don't interfere with each other. +// All unique IDs generated in the same test execution will have the same timestamp. +func uniqueID(prefix string) string { + uniqueIDCounter++ + // Zero-pad the counter for lexical sort order. + return fmt.Sprintf("%s-t%d-%04d", prefix, startTime.UnixNano(), uniqueIDCounter) +} + +// expiredUniqueIDs returns a subset of ids that are unique IDs as generated by +// uniqueID(prefix) and are older than expiredAge. +func expiredUniqueIDs(ids []string, prefix string) []string { + var expired []string + for _, id := range ids { + t, ok := extractTime(id, prefix) + if ok && time.Since(t) > expiredAge { + expired = append(expired, id) + } + } + return expired +} + +// extractTime extracts the timestamp of s, which must begin with prefix and +// match the form generated by uniqueID. The second return value is true on +// success, false if there was a problem. +func extractTime(s, prefix string) (time.Time, bool) { + if !strings.HasPrefix(s, prefix+"-t") { + return time.Time{}, false + } + s = s[len(prefix)+2:] + i := strings.Index(s, "-") + if i < 0 { + return time.Time{}, false + } + nanos, err := strconv.ParseInt(s[:i], 10, 64) + if err != nil { + return time.Time{}, false + } + return time.Unix(0, nanos), true +} + +func TestExtractTime(t *testing.T) { + uid := uniqueID("unique-ID") + got, ok := extractTime(uid, "unique-ID") + if !ok { + t.Fatal("got ok = false, want true") + } + if !startTime.Equal(got) { + t.Errorf("got %s, want %s", got, startTime) + } + + got, ok = extractTime("p-t0-doesnotmatter", "p") + if !ok { + t.Fatal("got false, want true") + } + if want := time.Unix(0, 0); !want.Equal(got) { + t.Errorf("got %s, want %s", got, want) + } + if _, ok = extractTime("invalid-time-1234", "invalid"); ok { + t.Error("got true, want false") + } +} + +func TestExpiredUniqueIDs(t *testing.T) { + const prefix = "uid" + // The freshly unique IDs will have startTime as their timestamp. + uids := []string{uniqueID(prefix), "uid-tinvalid-1234", uniqueID(prefix), "uid-t0-1111"} + + // This test hasn't been running for very long, so only the last ID is expired. + got := expiredUniqueIDs(uids, prefix) + want := []string{uids[3]} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + + time.Sleep(100 * time.Millisecond) + + prev := expiredAge + expiredAge = 10 * time.Millisecond + defer func() { expiredAge = prev }() + // This test has been running for at least 10ms, so all but the invalid ID have expired. + got = expiredUniqueIDs(uids, prefix) + want = []string{uids[0], uids[2], uids[3]} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/acker.go b/vendor/cloud.google.com/go/pubsub/acker.go new file mode 100644 index 000000000..088ed79cf --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/acker.go @@ -0,0 +1,159 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" +) + +// ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty. +type ackBuffer struct { + Dirty chan struct{} + // Close done when ackBuffer is no longer needed. + Done chan struct{} + + mu sync.Mutex + pending []string + send bool +} + +// Add adds ackID to the buffer. +func (buf *ackBuffer) Add(ackID string) { + buf.mu.Lock() + defer buf.mu.Unlock() + buf.pending = append(buf.pending, ackID) + + // If we are transitioning into a non-empty notification state. + if buf.send && len(buf.pending) == 1 { + buf.notify() + } +} + +// RemoveAll removes all ackIDs from the buffer and returns them. +func (buf *ackBuffer) RemoveAll() []string { + buf.mu.Lock() + defer buf.mu.Unlock() + + ret := buf.pending + buf.pending = nil + return ret +} + +// SendNotifications enables sending dirty notification on empty -> non-empty transitions. +// If the buffer is already non-empty, a notification will be sent immediately. +func (buf *ackBuffer) SendNotifications() { + buf.mu.Lock() + defer buf.mu.Unlock() + + buf.send = true + // If we are transitioning into a non-empty notification state. + if len(buf.pending) > 0 { + buf.notify() + } +} + +func (buf *ackBuffer) notify() { + go func() { + select { + case buf.Dirty <- struct{}{}: + case <-buf.Done: + } + }() +} + +// acker acks messages in batches. +type acker struct { + s service + Ctx context.Context // The context to use when acknowledging messages. + Sub string // The full name of the subscription. + AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests. + + // Notify is called with an ack ID after the message with that ack ID + // has been processed. An ackID is considered to have been processed + // if at least one attempt has been made to acknowledge it. + Notify func(string) + + ackBuffer + + wg sync.WaitGroup + done chan struct{} +} + +// Start intiates processing of ackIDs which are added via Add. +// Notify is called with each ackID once it has been processed. +func (a *acker) Start() { + a.done = make(chan struct{}) + a.ackBuffer.Dirty = make(chan struct{}) + a.ackBuffer.Done = a.done + + a.wg.Add(1) + go func() { + defer a.wg.Done() + for { + select { + case <-a.ackBuffer.Dirty: + a.ack(a.ackBuffer.RemoveAll()) + case <-a.AckTick: + a.ack(a.ackBuffer.RemoveAll()) + case <-a.done: + return + } + } + + }() +} + +// Ack adds an ack id to be acked in the next batch. +func (a *acker) Ack(ackID string) { + a.ackBuffer.Add(ackID) +} + +// FastMode switches acker into a mode which acks messages as they arrive, rather than waiting +// for a.AckTick. +func (a *acker) FastMode() { + a.ackBuffer.SendNotifications() +} + +// Stop drops all pending messages, and releases resources before returning. +func (a *acker) Stop() { + close(a.done) + a.wg.Wait() +} + +const maxAckAttempts = 2 + +// ack acknowledges the supplied ackIDs. +// After the acknowledgement request has completed (regardless of its success +// or failure), ids will be passed to a.Notify. +func (a *acker) ack(ids []string) { + head, tail := a.s.splitAckIDs(ids) + for len(head) > 0 { + for i := 0; i < maxAckAttempts; i++ { + if a.s.acknowledge(a.Ctx, a.Sub, head) == nil { + break + } + } + // NOTE: if retry gives up and returns an error, we simply drop + // those ack IDs. The messages will be redelivered and this is + // a documented behaviour of the API. + head, tail = a.s.splitAckIDs(tail) + } + for _, id := range ids { + a.Notify(id) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/acker_test.go b/vendor/cloud.google.com/go/pubsub/acker_test.go new file mode 100644 index 000000000..9e283bafc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/acker_test.go @@ -0,0 +1,262 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestAcker(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} + + processed := make(chan string, 10) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + acker.Start() + + checkAckProcessed := func(ackIDs []string) { + got := <-s.acknowledgeCalled + sort.Strings(got.ackIDs) + + want := acknowledgeCall{ + subName: "subname", + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) + } + } + + acker.Ack("a") + acker.Ack("b") + tick <- time.Time{} + checkAckProcessed([]string{"a", "b"}) + acker.Ack("c") + tick <- time.Time{} + checkAckProcessed([]string{"c"}) + acker.Stop() + + // all IDS should have been sent to processed. + close(processed) + processedIDs := []string{} + for id := range processed { + processedIDs = append(processedIDs, id) + } + sort.Strings(processedIDs) + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(processedIDs, want) { + t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) + } +} + +func TestAckerFastMode(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} + + processed := make(chan string, 10) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + acker.Start() + + checkAckProcessed := func(ackIDs []string) { + got := <-s.acknowledgeCalled + sort.Strings(got.ackIDs) + + want := acknowledgeCall{ + subName: "subname", + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) + } + } + // No ticks are sent; fast mode doesn't need them. + acker.Ack("a") + acker.Ack("b") + acker.FastMode() + checkAckProcessed([]string{"a", "b"}) + acker.Ack("c") + checkAckProcessed([]string{"c"}) + acker.Stop() + + // all IDS should have been sent to processed. + close(processed) + processedIDs := []string{} + for id := range processed { + processedIDs = append(processedIDs, id) + } + sort.Strings(processedIDs) + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(processedIDs, want) { + t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) + } +} + +// TestAckerStop checks that Stop returns immediately. +func TestAckerStop(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall, 10)} + + processed := make(chan string) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + + acker.Start() + + stopped := make(chan struct{}) + + acker.Ack("a") + + go func() { + acker.Stop() + stopped <- struct{}{} + }() + + // Stopped should have been written to by the time this sleep completes. + time.Sleep(time.Millisecond) + + // Receiving from processed should cause Stop to subsequently return, + // so it should never be possible to read from stopped before + // processed. + select { + case <-stopped: + case <-processed: + t.Errorf("acker.Stop processed an ack id before returning") + case <-time.After(time.Millisecond): + t.Errorf("acker.Stop never returned") + } +} + +type ackCallResult struct { + ackIDs []string + err error +} + +type ackService struct { + service + + calls []ackCallResult + + t *testing.T // used for error logging. +} + +func (as *ackService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + if len(as.calls) == 0 { + as.t.Fatalf("unexpected call to acknowledge: ackIDs: %v", ackIDs) + } + call := as.calls[0] + as.calls = as.calls[1:] + + if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { + as.t.Errorf("unexpected arguments to acknowledge: got: %v ; want: %v", got, want) + } + return call.err +} + +// Test implementation returns the first 2 elements as head, and the rest as tail. +func (as *ackService) splitAckIDs(ids []string) ([]string, []string) { + if len(ids) < 2 { + return ids, nil + } + return ids[:2], ids[2:] +} + +func TestAckerSplitsBatches(t *testing.T) { + type testCase struct { + calls []ackCallResult + } + for _, tc := range []testCase{ + { + calls: []ackCallResult{ + { + ackIDs: []string{"a", "b"}, + }, + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + { + calls: []ackCallResult{ + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // On error we retry once. + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // We give up after failing twice, so we move on to the next set, "c" and "d" + { + ackIDs: []string{"c", "d"}, + err: errors.New("bang"), + }, + // Again, we retry once. + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + } { + s := &ackService{ + t: t, + calls: tc.calls, + } + + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + Notify: func(string) {}, + } + + acker.ack([]string{"a", "b", "c", "d", "e", "f"}) + + if len(s.calls) != 0 { + t.Errorf("expected ack calls did not occur: %v", s.calls) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md new file mode 100644 index 000000000..ae6634039 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md @@ -0,0 +1,11 @@ +Auto-generated pubsub v1 clients +================================= + +This package includes auto-generated clients for the pubsub v1 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/pubsub) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go new file mode 100644 index 000000000..fbeb488e5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -0,0 +1,22 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package pubsub is an experimental, auto-generated package for the +// pubsub API. +// +// Provides reliable, many-to-many, asynchronous messaging between applications. +// +package pubsub // import "cloud.google.com/go/pubsub/apiv1" diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go new file mode 100644 index 000000000..f6f072e1f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -0,0 +1,454 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "fmt" + "math" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + publisherProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + publisherTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") +) + +// PublisherCallOptions contains the retry settings for each method of this client. +type PublisherCallOptions struct { + CreateTopic []gax.CallOption + Publish []gax.CallOption + GetTopic []gax.CallOption + ListTopics []gax.CallOption + ListTopicSubscriptions []gax.CallOption + DeleteTopic []gax.CallOption +} + +func defaultPublisherClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + ), + } +} + +func defaultPublisherCallOptions() *PublisherCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "one_plus_delivery"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + + return &PublisherCallOptions{ + CreateTopic: retry[[2]string{"default", "idempotent"}], + Publish: retry[[2]string{"messaging", "one_plus_delivery"}], + GetTopic: retry[[2]string{"default", "idempotent"}], + ListTopics: retry[[2]string{"default", "idempotent"}], + ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteTopic: retry[[2]string{"default", "idempotent"}], + } +} + +// PublisherClient is a client for interacting with Publisher. +type PublisherClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client pubsubpb.PublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewPublisherClient creates a new publisher service client. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &PublisherClient{ + conn: conn, + client: pubsubpb.NewPublisherClient(conn), + CallOptions: defaultPublisherCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// ProjectPath returns the path for the project resource. +func PublisherProjectPath(project string) string { + path, err := publisherProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// TopicPath returns the path for the topic resource. +func PublisherTopicPath(project string, topic string) string { + path, err := publisherTopicPathTemplate.Render(map[string]string{ + "project": project, + "topic": topic, + }) + if err != nil { + panic(err) + } + return path +} + +// CreateTopic creates the given topic with the given name. +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.CreateTopic(ctx, req) + return err + }, c.CallOptions.CreateTopic...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Publish adds one or more messages to the topic. Returns `NOT_FOUND` if the topic +// does not exist. The message payload must not be empty; it must contain +// either a non-empty data field, or at least one attribute. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.PublishResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.Publish(ctx, req) + return err + }, c.CallOptions.Publish...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.GetTopic(ctx, req) + return err + }, c.CallOptions.GetTopic...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) *TopicIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &TopicIterator{} + it.apiCall = func() error { + var resp *pubsubpb.ListTopicsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListTopics(ctx, req) + return err + }, c.CallOptions.ListTopics...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Topics + return nil + } + return it +} + +// ListTopicSubscriptions lists the name of the subscriptions for this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) *StringIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &StringIterator{} + it.apiCall = func() error { + var resp *pubsubpb.ListTopicSubscriptionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListTopicSubscriptions(ctx, req) + return err + }, c.CallOptions.ListTopicSubscriptions...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Subscriptions + return nil + } + return it +} + +// DeleteTopic deletes the topic with the given name. Returns `NOT_FOUND` if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their `topic` field is set to `_deleted-topic_`. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteTopic(ctx, req) + return err + }, c.CallOptions.DeleteTopic...) + return err +} + +// TopicIterator manages a stream of *pubsubpb.Topic. +type TopicIterator struct { + // The current page data. + items []*pubsubpb.Topic + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *TopicIterator) NextPage() ([]*pubsubpb.Topic, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *TopicIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *TopicIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *TopicIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *TopicIterator) NextPageToken() string { + return it.nextPageToken +} + +// StringIterator manages a stream of string. +type StringIterator struct { + // The current page data. + items []string + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *StringIterator) NextPage() ([]string, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *StringIterator) Next() (string, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return "", Done + } + if err := it.apiCall(); err != nil { + return "", err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *StringIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *StringIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *StringIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *StringIterator) NextPageToken() string { + return it.nextPageToken +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go new file mode 100644 index 000000000..69da47cb0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go @@ -0,0 +1,147 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewPublisherClient() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExamplePublisherClient_CreateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Topic{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_Publish() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PublishRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Publish(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_GetTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_ListTopics() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopics(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_ListTopicSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopicSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_DeleteTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteTopicRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsub.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub.go new file mode 100644 index 000000000..3165168b1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub.go @@ -0,0 +1,26 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import "errors" + +const ( + gapicNameVersion = "gapic/0.1.0" +) + +// Done is returned by iterators on successful completion. +var Done = errors.New("iterator done") diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go new file mode 100644 index 000000000..308d30131 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -0,0 +1,402 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "fmt" + "math" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + subscriberProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + subscriberSubscriptionPathTemplate = gax.MustCompilePathTemplate("projects/{project}/subscriptions/{subscription}") + subscriberTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") +) + +// SubscriberCallOptions contains the retry settings for each method of this client. +type SubscriberCallOptions struct { + CreateSubscription []gax.CallOption + GetSubscription []gax.CallOption + ListSubscriptions []gax.CallOption + DeleteSubscription []gax.CallOption + ModifyAckDeadline []gax.CallOption + Acknowledge []gax.CallOption + Pull []gax.CallOption + ModifyPushConfig []gax.CallOption +} + +func defaultSubscriberClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + ), + } +} + +func defaultSubscriberCallOptions() *SubscriberCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + + return &SubscriberCallOptions{ + CreateSubscription: retry[[2]string{"default", "idempotent"}], + GetSubscription: retry[[2]string{"default", "idempotent"}], + ListSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteSubscription: retry[[2]string{"default", "idempotent"}], + ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], + Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], + Pull: retry[[2]string{"messaging", "non_idempotent"}], + ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], + } +} + +// SubscriberClient is a client for interacting with Subscriber. +type SubscriberClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client pubsubpb.SubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewSubscriberClient creates a new subscriber service client. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the `Pull` method. +func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &SubscriberClient{ + conn: conn, + client: pubsubpb.NewSubscriberClient(conn), + CallOptions: defaultSubscriberCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// ProjectPath returns the path for the project resource. +func SubscriberProjectPath(project string) string { + path, err := subscriberProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// SubscriptionPath returns the path for the subscription resource. +func SubscriberSubscriptionPath(project string, subscription string) string { + path, err := subscriberSubscriptionPathTemplate.Render(map[string]string{ + "project": project, + "subscription": subscription, + }) + if err != nil { + panic(err) + } + return path +} + +// TopicPath returns the path for the topic resource. +func SubscriberTopicPath(project string, topic string) string { + path, err := subscriberTopicPathTemplate.Render(map[string]string{ + "project": project, + "topic": topic, + }) + if err != nil { + panic(err) + } + return path +} + +// CreateSubscription creates a subscription to a given topic for a given subscriber. +// If the subscription already exists, returns `ALREADY_EXISTS`. +// If the corresponding topic doesn't exist, returns `NOT_FOUND`. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.CreateSubscription(ctx, req) + return err + }, c.CallOptions.CreateSubscription...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.GetSubscription(ctx, req) + return err + }, c.CallOptions.GetSubscription...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) *SubscriptionIterator { + ctx = metadata.NewContext(ctx, c.metadata) + it := &SubscriptionIterator{} + it.apiCall = func() error { + var resp *pubsubpb.ListSubscriptionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + req.PageToken = it.nextPageToken + req.PageSize = it.pageSize + resp, err = c.client.ListSubscriptions(ctx, req) + return err + }, c.CallOptions.ListSubscriptions...) + if err != nil { + return err + } + if resp.NextPageToken == "" { + it.atLastPage = true + } + it.nextPageToken = resp.NextPageToken + it.items = resp.Subscriptions + return nil + } + return it +} + +// DeleteSubscription deletes an existing subscription. All pending messages in the subscription +// are immediately dropped. Calls to `Pull` after deletion will return +// `NOT_FOUND`. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription, or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteSubscription(ctx, req) + return err + }, c.CallOptions.DeleteSubscription...) + return err +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.ModifyAckDeadline(ctx, req) + return err + }, c.CallOptions.ModifyAckDeadline...) + return err +} + +// Acknowledge acknowledges the messages associated with the `ack_ids` in the +// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.Acknowledge(ctx, req) + return err + }, c.CallOptions.Acknowledge...) + return err +} + +// Pull pulls messages from the server. Returns an empty list if there are no +// messages available in the backlog. The server may return `UNAVAILABLE` if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *pubsubpb.PullResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.Pull(ctx, req) + return err + }, c.CallOptions.Pull...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ModifyPushConfig modifies the `PushConfig` for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty `PushConfig`) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the `PushConfig`. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) error { + ctx = metadata.NewContext(ctx, c.metadata) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.ModifyPushConfig(ctx, req) + return err + }, c.CallOptions.ModifyPushConfig...) + return err +} + +// SubscriptionIterator manages a stream of *pubsubpb.Subscription. +type SubscriptionIterator struct { + // The current page data. + items []*pubsubpb.Subscription + atLastPage bool + currentIndex int + pageSize int32 + nextPageToken string + apiCall func() error +} + +// NextPage returns the next page of results. +// It will return at most the number of results specified by the last call to SetPageSize. +// If SetPageSize was never called or was called with a value less than 1, +// the page size is determined by the underlying service. +// +// NextPage may return a second return value of Done along with the last page of results. After +// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *SubscriptionIterator) NextPage() ([]*pubsubpb.Subscription, error) { + if it.atLastPage { + // We already returned Done with the last page of items. Continue to + // return Done, but with no items. + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + if it.atLastPage { + return it.items, Done + } + return it.items, nil +} + +// Next returns the next result. Its second return value is Done if there are no more results. +// Once next returns Done, all subsequent calls will return Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to +// affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { + for it.currentIndex >= len(it.items) { + if it.atLastPage { + return nil, Done + } + if err := it.apiCall(); err != nil { + return nil, err + } + it.currentIndex = 0 + } + result := it.items[it.currentIndex] + it.currentIndex++ + return result, nil +} + +// PageSize returns the page size for all subsequent calls to NextPage. +func (it *SubscriptionIterator) PageSize() int { + return int(it.pageSize) +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +func (it *SubscriptionIterator) SetPageSize(pageSize int) { + if pageSize > math.MaxInt32 { + pageSize = math.MaxInt32 + } + it.pageSize = int32(pageSize) +} + +// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from +// a previous point. +func (it *SubscriptionIterator) SetPageToken(token string) { + it.nextPageToken = token +} + +// NextPageToken returns a page token that can be used with SetPageToken to resume +// iteration from the next page. It returns the empty string if there are no more pages. +func (it *SubscriptionIterator) NextPageToken() string { + return it.nextPageToken +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go new file mode 100644 index 000000000..1566222ea --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go @@ -0,0 +1,173 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewSubscriberClient() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleSubscriberClient_CreateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Subscription{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_GetSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_ListSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_DeleteSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSubscriptionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ModifyAckDeadline() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyAckDeadlineRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyAckDeadline(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Acknowledge() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.AcknowledgeRequest{ + // TODO: Fill request struct fields. + } + err = c.Acknowledge(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Pull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PullRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Pull(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_ModifyPushConfig() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyPushConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyPushConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go new file mode 100644 index 000000000..cf43275f3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -0,0 +1,115 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub +messages, hiding the the details of the underlying server RPCs. Google Cloud +Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders +and receivers. + +Note: This package is experimental and may make backwards-incompatible changes. + +More information about Google Cloud Pub/Sub is available at +https://cloud.google.com/pubsub/docs + +Publishing + +Google Cloud Pub/Sub messages are published to topics. Topics may be created +using the pubsub package like so: + + topic, err := pubsubClient.NewTopic(context.Background(), "topic-name") + +Messages may then be published to a topic: + + msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("payload"), + }) + +Receiving + +To receive messages published to a topic, clients create subscriptions +to the topic. There may be more than one subscription per topic; each message +that is published to the topic will be delivered to all of its subscriptions. + +Subsciptions may be created like so: + + sub, err := pubsubClient.NewSubscription(context.Background(), "sub-name", topic, 0, nil) + +Messages are then consumed from a subscription via an iterator: + + // Construct the iterator + it, err := sub.Pull(context.Background()) + if err != nil { + // handle err ... + } + defer it.Stop() + + // Consume N messages + for i := 0; i < N; i++ { + msg, err := it.Next() + if err == pubsub.Done { + break + } + if err != nil { + // handle err ... + break + } + + log.Print("got message: ", string(msg.Data)) + msg.Done(true) + } + +The message iterator returns messages one at a time, fetching batches of +messages behind the scenes as needed. Once client code has processed the +message, it must call Message.Done, otherwise the message will eventually be +redelivered. For more information and configuration options, see "Deadlines" +below. + +Note: It is possible for Messages to be redelivered, even if Message.Done has +been called. Client code must be robust to multiple deliveries of messages. + +Deadlines + +The default pubsub deadlines are suitable for most use cases, but may be +overridden. This section describes the tradeoffs that should be considered +when overriding the defaults. + +Behind the scenes, each message returned by the Pub/Sub server has an +associated lease, known as an "ACK deadline". +Unless a message is acknowledged within the ACK deadline, or the client requests that +the ACK deadline be extended, the message will become elegible for redelivery. +As a convenience, the pubsub package will automatically extend deadlines until +either: + * Message.Done is called, or + * the "MaxExtension" period elapses from the time the message is fetched from the server. + +The initial ACK deadline given to each messages defaults to 10 seconds, but may +be overridden during subscription creation. Selecting an ACK deadline is a +tradeoff between message redelivery latency and RPC volume. If the pubsub +package fails to acknowledge or extend a message (e.g. due to unexpected +termination of the process), a shorter ACK deadline will generally result in +faster message redelivery by the Pub/Sub system. However, a short ACK deadline +may also increase the number of deadline extension RPCs that the pubsub package +sends to the server. + +The default max extension period is DefaultMaxExtension, and can be overridden +by passing a MaxExtension option to Subscription.Pull. Selecting a max +extension period is a tradeoff between the speed at which client code must +process messages, and the redelivery delay if messages fail to be acknowledged +(e.g. because client code neglects to do so). Using a large MaxExtension +increases the available time for client code to process messages. However, if +the client code neglects to call Message.Done, a large MaxExtension will +increase the delay before the message is redelivered. +*/ +package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/endtoend_test.go b/vendor/cloud.google.com/go/pubsub/endtoend_test.go new file mode 100644 index 000000000..429e43942 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/endtoend_test.go @@ -0,0 +1,323 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +const timeout = time.Minute * 10 +const ackDeadline = time.Second * 10 + +const batchSize = 100 +const batches = 100 + +// messageCounter keeps track of how many times a given message has been received. +type messageCounter struct { + mu sync.Mutex + counts map[string]int + // A value is sent to recv each time Inc is called. + recv chan struct{} +} + +func (mc *messageCounter) Inc(msgID string) { + mc.mu.Lock() + mc.counts[msgID] += 1 + mc.mu.Unlock() + mc.recv <- struct{}{} +} + +// process pulls messages from an iterator and records them in mc. +func process(t *testing.T, it *Iterator, mc *messageCounter) { + for { + m, err := it.Next() + if err == Done { + return + } + + if err != nil { + t.Errorf("unexpected err from iterator: %v", err) + return + } + mc.Inc(m.ID) + // Simulate time taken to process m, while continuing to process more messages. + go func() { + // Some messages will need to have their ack deadline extended due to this delay. + delay := rand.Intn(int(ackDeadline * 3)) + time.After(time.Duration(delay)) + m.Done(true) + }() + } +} + +// newIter constructs a new Iterator. +func newIter(t *testing.T, ctx context.Context, sub *Subscription) *Iterator { + it, err := sub.Pull(ctx) + if err != nil { + t.Fatalf("error constructing iterator: %v", err) + } + return it +} + +// launchIter launches a number of goroutines to pull from the supplied Iterator. +func launchIter(t *testing.T, ctx context.Context, it *Iterator, mc *messageCounter, n int, wg *sync.WaitGroup) { + for j := 0; j < n; j++ { + wg.Add(1) + go func() { + defer wg.Done() + process(t, it, mc) + }() + } +} + +// iteratorLifetime controls how long iterators live for before they are stopped. +type iteratorLifetimes interface { + // lifetimeChan should be called when an iterator is started. The + // returned channel will send when the iterator should be stopped. + lifetimeChan() <-chan time.Time +} + +var immortal = &explicitLifetimes{} + +// explicitLifetimes implements iteratorLifetime with hard-coded lifetimes, falling back +// to indefinite lifetimes when no explicit lifetimes remain. +type explicitLifetimes struct { + mu sync.Mutex + lifetimes []time.Duration +} + +func (el *explicitLifetimes) lifetimeChan() <-chan time.Time { + el.mu.Lock() + defer el.mu.Unlock() + if len(el.lifetimes) == 0 { + return nil + } + lifetime := el.lifetimes[0] + el.lifetimes = el.lifetimes[1:] + return time.After(lifetime) +} + +// consumer consumes messages according to its configuration. +type consumer struct { + // How many goroutines should pull from the subscription. + iteratorsInFlight int + // How many goroutines should pull from each iterator. + concurrencyPerIterator int + + lifetimes iteratorLifetimes +} + +// consume reads messages from a subscription, and keeps track of what it receives in mc. +// After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made. +func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription, mc *messageCounter, wg *sync.WaitGroup, stop <-chan struct{}) { + for i := 0; i < c.iteratorsInFlight; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + it := newIter(t, ctx, sub) + launchIter(t, ctx, it, mc, c.concurrencyPerIterator, wg) + + select { + case <-c.lifetimes.lifetimeChan(): + it.Stop() + case <-stop: + it.Stop() + return + } + } + + }() + } +} + +// publish publishes many messages to topic, and returns the published message ids. +func publish(t *testing.T, ctx context.Context, topic *Topic) []string { + var published []string + msgs := make([]*Message, batchSize) + for i := 0; i < batches; i++ { + for j := 0; j < batchSize; j++ { + text := fmt.Sprintf("msg %02d-%02d", i, j) + msgs[j] = &Message{Data: []byte(text)} + } + ids, err := topic.Publish(ctx, msgs...) + if err != nil { + t.Errorf("Publish error: %v", err) + } + published = append(published, ids...) + } + return published +} + +// diff returns counts of the differences between got and want. +func diff(got, want map[string]int) map[string]int { + ids := make(map[string]struct{}) + for k := range got { + ids[k] = struct{}{} + } + for k := range want { + ids[k] = struct{}{} + } + + gotWantCount := make(map[string]int) + for k := range ids { + if got[k] == want[k] { + continue + } + desc := fmt.Sprintf("", got[k], want[k]) + gotWantCount[desc] += 1 + } + return gotWantCount +} + +// TestEndToEnd pumps many messages into a topic and tests that they are all delivered to each subscription for the topic. +// It also tests that messages are not unexpectedly redelivered. +func TestEndToEnd(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("endtoend-%d", now.Unix()) + subPrefix := fmt.Sprintf("endtoend-%d", now.Unix()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Delete(ctx) + + // Three subscriptions to the same topic. + var subA, subB, subC *Subscription + if subA, err = client.CreateSubscription(ctx, subPrefix+"-a", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subA.Delete(ctx) + + if subB, err = client.CreateSubscription(ctx, subPrefix+"-b", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subB.Delete(ctx) + + if subC, err = client.CreateSubscription(ctx, subPrefix+"-c", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subC.Delete(ctx) + + expectedCounts := make(map[string]int) + for _, id := range publish(t, ctx, topic) { + expectedCounts[id] = 1 + } + + // recv provides an indication that messages are still arriving. + recv := make(chan struct{}) + + // Keep track of the number of times each message (by message id) was + // seen from each subscription. + mcA := &messageCounter{counts: make(map[string]int), recv: recv} + mcB := &messageCounter{counts: make(map[string]int), recv: recv} + mcC := &messageCounter{counts: make(map[string]int), recv: recv} + + stopC := make(chan struct{}) + + // We have three subscriptions to our topic. + // Each subscription will get a copy of each pulished message. + // + // subA has just one iterator, while subB has two. The subB iterators + // will each process roughly half of the messages for subB. All of + // these iterators live until all messages have been consumed. subC is + // processed by a series of short-lived iterators. + + var wg sync.WaitGroup + + con := &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: immortal, + } + con.consume(t, ctx, subA, mcA, &wg, stopC) + + con = &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: immortal, + } + con.consume(t, ctx, subB, mcB, &wg, stopC) + + con = &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: &explicitLifetimes{ + lifetimes: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2}, + }, + } + con.consume(t, ctx, subC, mcC, &wg, stopC) + + go func() { + timeoutC := time.After(timeout) + // Every time this ticker ticks, we will check if we have received any + // messages since the last time it ticked. We check less frequently + // than the ack deadline, so that we can detect if messages are + // redelivered after having their ack deadline extended. + checkQuiescence := time.NewTicker(ackDeadline * 3) + defer checkQuiescence.Stop() + + var received bool + for { + select { + case <-recv: + received = true + case <-checkQuiescence.C: + if received { + received = false + } else { + close(stopC) + return + } + case <-timeoutC: + t.Errorf("timed out") + close(stopC) + return + } + } + }() + wg.Wait() + + for _, mc := range []*messageCounter{mcA, mcB, mcC} { + if got, want := mc.counts, expectedCounts; !reflect.DeepEqual(got, want) { + t.Errorf("message counts: %v\n", diff(got, want)) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go new file mode 100644 index 000000000..bc5bbc572 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go @@ -0,0 +1,42 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" +) + +func ExampleSubscriptionIterator() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + for { + sub, err := it.Next() + if err == pubsub.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sub.Name()) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_test.go b/vendor/cloud.google.com/go/pubsub/example_test.go new file mode 100644 index 000000000..5ec1702b7 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_test.go @@ -0,0 +1,225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + _, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // See the other examples to learn how to use the Client. +} + +func ExampleClient_CreateTopic() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + _ = topic // TODO: use the topic. +} + +func ExampleClient_Topics() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all topics. + it := client.Topics(ctx) + _ = it // See the TopicIterator example for its usage. +} + +func ExampleClient_CreateSubscription() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + // Create a new subscription to the previously created topic + // with the given name. + sub, err := client.CreateSubscription(ctx, "subName", topic, 10*time.Second, nil) + if err != nil { + // TODO: Handle error. + } + + _ = sub // TODO: use the subscription. +} + +func ExampleClient_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + _ = it // See the SubscriptionIterator example for its usage. +} + +func ExampleTopic_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + if err := topic.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTopic_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + ok, err := topic.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Topic doesn't exist. + } +} + +func ExampleTopic_Publish() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("Published a message with a message ID: %s\n", msgIDs[0]) +} + +func ExampleTopic_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.Topic("topic-name") + // List all subscriptions of the topic (maybe of multiple projects). + for subs := topic.Subscriptions(ctx); ; { + sub, err := subs.Next() + if err == pubsub.Done { + break + } + if err != nil { + // TODO: Handle error. + } + _ = sub // TODO: use the subscription. + } +} + +func ExampleSubscription_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + if err := sub.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscription_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + ok, err := sub.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Subscription doesn't exist. + } +} + +func ExampleSubscription_Pull() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + it, err := sub.Pull(ctx) + if err != nil { + // TODO: Handle error. + } + + // Ensure that the iterator is closed down cleanly. + defer it.Stop() + + // Consume 10 messages. + for i := 0; i < 10; i++ { + m, err := it.Next() + if err == pubsub.Done { + // There are no more messages. This will happen if it.Stop is called. + break + } + if err != nil { + // TODO: Handle error. + break + } + fmt.Printf("message %d: %s\n", i, m.Data) + + // Acknowledge the message. + m.Done(true) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go new file mode 100644 index 000000000..396d342a9 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go @@ -0,0 +1,42 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" +) + +func ExampleTopicIterator() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all topics. + it := client.Topics(ctx) + for { + t, err := it.Next() + if err == pubsub.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t.Name()) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/integration_test.go b/vendor/cloud.google.com/go/pubsub/integration_test.go new file mode 100644 index 000000000..bba847e23 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/integration_test.go @@ -0,0 +1,165 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +// messageData is used to hold the contents of a message so that it can be compared againts the contents +// of another message without regard to irrelevant fields. +type messageData struct { + ID string + Data []byte + Attributes map[string]string +} + +func extractMessageData(m *Message) *messageData { + return &messageData{ + ID: m.ID, + Data: m.Data, + Attributes: m.Attributes, + } +} + +func TestAll(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("topic-%d", now.Unix()) + subName := fmt.Sprintf("subscription-%d", now.Unix()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Errorf("CreateTopic error: %v", err) + } + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subName, topic, 0, nil); err != nil { + t.Errorf("CreateSub error: %v", err) + } + + exists, err := topic.Exists(ctx) + if err != nil { + t.Fatalf("TopicExists error: %v", err) + } + if !exists { + t.Errorf("topic %s should exist, but it doesn't", topic) + } + + exists, err = sub.Exists(ctx) + if err != nil { + t.Fatalf("SubExists error: %v", err) + } + if !exists { + t.Errorf("subscription %s should exist, but it doesn't", subName) + } + + msgs := []*Message{} + for i := 0; i < 10; i++ { + text := fmt.Sprintf("a message with an index %d", i) + attrs := make(map[string]string) + attrs["foo"] = "bar" + msgs = append(msgs, &Message{ + Data: []byte(text), + Attributes: attrs, + }) + } + + ids, err := topic.Publish(ctx, msgs...) + if err != nil { + t.Fatalf("Publish (1) error: %v", err) + } + + if len(ids) != len(msgs) { + t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), len(msgs)) + } + + want := make(map[string]*messageData) + for i, m := range msgs { + md := extractMessageData(m) + md.ID = ids[i] + want[md.ID] = md + } + + // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. + timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) + it, err := sub.Pull(timeoutCtx) + if err != nil { + t.Fatalf("error constructing iterator: %v", err) + } + defer it.Stop() + got := make(map[string]*messageData) + for i := 0; i < len(want); i++ { + m, err := it.Next() + if err != nil { + t.Fatalf("error getting next message: %v", err) + } + md := extractMessageData(m) + got[md.ID] = md + m.Done(true) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("messages: got: %v ; want: %v", got, want) + } + + // base64 test + data := "=@~" + _, err = topic.Publish(ctx, &Message{Data: []byte(data)}) + if err != nil { + t.Fatalf("Publish error: %v", err) + } + + m, err := it.Next() + if err != nil { + t.Fatalf("Pull error: %v", err) + } + + if string(m.Data) != data { + t.Errorf("unexpected message received; %s, want %s", string(m.Data), data) + } + m.Done(true) + + err = sub.Delete(ctx) + if err != nil { + t.Errorf("DeleteSub error: %v", err) + } + + err = topic.Delete(ctx) + if err != nil { + t.Errorf("DeleteTopic error: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go new file mode 100644 index 000000000..672103309 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -0,0 +1,165 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +// Done is returned when an iteration is complete. +var Done = iterator.Done + +type Iterator struct { + // kaTicker controls how often we send an ack deadline extension request. + kaTicker *time.Ticker + // ackTicker controls how often we acknowledge a batch of messages. + ackTicker *time.Ticker + + ka *keepAlive + acker *acker + puller *puller + + // mu ensures that cleanup only happens once, and concurrent Stop + // invocations block until cleanup completes. + mu sync.Mutex + + // closed is used to signal that Stop has been called. + closed chan struct{} +} + +// newIterator starts a new Iterator. Stop must be called on the Iterator +// when it is no longer needed. +// subName is the full name of the subscription to pull messages from. +// ctx is the context to use for acking messages and extending message deadlines. +func newIterator(ctx context.Context, s service, subName string, po *pullOptions) *Iterator { + // TODO: make kaTicker frequency more configurable. + // (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop + + // TODO: make ackTicker more configurable. Something less than + // kaTicker is a reasonable default (there's no point extending + // messages when they could be acked instead). + ackTicker := time.NewTicker(keepAlivePeriod / 2) // Stopped in it.Stop + + ka := &keepAlive{ + s: s, + Ctx: ctx, + Sub: subName, + ExtensionTick: kaTicker.C, + Deadline: po.ackDeadline, + MaxExtension: po.maxExtension, + } + + ack := &acker{ + s: s, + Ctx: ctx, + Sub: subName, + AckTick: ackTicker.C, + Notify: ka.Remove, + } + + pull := newPuller(s, subName, ctx, int64(po.maxPrefetch), ka.Add, ka.Remove) + + ka.Start() + ack.Start() + return &Iterator{ + kaTicker: kaTicker, + ackTicker: ackTicker, + ka: ka, + acker: ack, + puller: pull, + closed: make(chan struct{}), + } +} + +// Next returns the next Message to be processed. The caller must call +// Message.Done when finished with it. +// Once Stop has been called, calls to Next will return Done. +func (it *Iterator) Next() (*Message, error) { + m, err := it.puller.Next() + + if err == nil { + m.it = it + return m, nil + } + + select { + // If Stop has been called, we return Done regardless the value of err. + case <-it.closed: + return nil, Done + default: + return nil, err + } +} + +// Client code must call Stop on an Iterator when finished with it. +// Stop will block until Done has been called on all Messages that have been +// returned by Next, or until the context with which the Iterator was created +// is cancelled or exceeds its deadline. +// Stop need only be called once, but may be called multiple times from +// multiple goroutines. +func (it *Iterator) Stop() { + it.mu.Lock() + defer it.mu.Unlock() + + select { + case <-it.closed: + // Cleanup has already been performed. + return + default: + } + + // We close this channel before calling it.puller.Stop to ensure that we + // reliably return Done from Next. + close(it.closed) + + // Stop the puller. Once this completes, no more messages will be added + // to it.ka. + it.puller.Stop() + + // Start acking messages as they arrive, ignoring ackTicker. This will + // result in it.ka.Stop, below, returning as soon as possible. + it.acker.FastMode() + + // This will block until + // (a) it.ka.Ctx is done, or + // (b) all messages have been removed from keepAlive. + // (b) will happen once all outstanding messages have been either ACKed or NACKed. + it.ka.Stop() + + // There are no more live messages, so kill off the acker. + it.acker.Stop() + + it.kaTicker.Stop() + it.ackTicker.Stop() +} + +func (it *Iterator) done(ackID string, ack bool) { + if ack { + it.acker.Ack(ackID) + // There's no need to call it.ka.Remove here, as acker will + // call it via its Notify function. + } else { + // TODO: explicitly NACK the message by sending an + // ModifyAckDeadline request with 0s deadline, to make the + // message immediately available for redelivery. + it.ka.Remove(ackID) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator_test.go b/vendor/cloud.google.com/go/pubsub/iterator_test.go new file mode 100644 index 000000000..66f3c175f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestReturnsDoneOnStop(t *testing.T) { + type testCase struct { + abort func(*Iterator, context.CancelFunc) + want error + } + + for _, tc := range []testCase{ + { + abort: func(it *Iterator, cancel context.CancelFunc) { + it.Stop() + }, + want: Done, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + cancel() + }, + want: context.Canceled, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + it.Stop() + cancel() + }, + want: Done, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + cancel() + it.Stop() + }, + want: Done, + }, + } { + s := &blockingFetch{} + ctx, cancel := context.WithCancel(context.Background()) + it := newIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: time.Hour}) + defer it.Stop() + tc.abort(it, cancel) + + _, err := it.Next() + if err != tc.want { + t.Errorf("iterator Next error after abort: got:\n%v\nwant:\n%v", err, tc.want) + } + } +} + +// blockingFetch implements message fetching by not returning until its context is cancelled. +type blockingFetch struct { + service +} + +func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) { + <-ctx.Done() + return nil, ctx.Err() +} + +// justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC +// succeeds, so the rest of puller.Next will continue to execute and return sucessfully. +type justInTimeFetch struct { + service +} + +func (s *justInTimeFetch) fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) { + <-ctx.Done() + // The context was cancelled, but let's pretend that this happend just after our RPC returned. + + var result []*Message + for i := 0; i < int(maxMessages); i++ { + val := fmt.Sprintf("msg%v", i) + result = append(result, &Message{Data: []byte(val), ackID: val}) + } + return result, nil +} + +func (s *justInTimeFetch) splitAckIDs(ids []string) ([]string, []string) { + return nil, nil +} + +func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + return nil +} + +func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) { + // Each test case is excercised by making two concurrent blocking calls on an + // Iterator, and then aborting the iterator. + // The result should be one call to Next returning a message, and the other returning an error. + type testCase struct { + abort func(*Iterator, context.CancelFunc) + // want is the error that should be returned from one Next invocation. + want error + } + for n := 1; n < 3; n++ { + for _, tc := range []testCase{ + { + abort: func(it *Iterator, cancel context.CancelFunc) { + it.Stop() + }, + want: Done, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + cancel() + }, + want: context.Canceled, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + it.Stop() + cancel() + }, + want: Done, + }, + { + abort: func(it *Iterator, cancel context.CancelFunc) { + cancel() + it.Stop() + }, + want: Done, + }, + } { + s := &justInTimeFetch{} + ctx, cancel := context.WithCancel(context.Background()) + + // if maxPrefetch == 1, there will be no messages in the puller buffer when Next is invoked the second time. + // if maxPrefetch == 2, there will be 1 message in the puller buffer when Next is invoked the second time. + po := &pullOptions{ + ackDeadline: time.Second * 10, + maxExtension: time.Hour, + maxPrefetch: n, + } + it := newIterator(ctx, s, "subname", po) + defer it.Stop() + + type result struct { + m *Message + err error + } + results := make(chan *result, 2) + + for i := 0; i < 2; i++ { + go func() { + m, err := it.Next() + results <- &result{m, err} + if err == nil { + m.Done(false) + } + }() + } + // Wait for goroutines to block on it.Next(). + time.Sleep(time.Millisecond) + tc.abort(it, cancel) + + result1 := <-results + result2 := <-results + + // There should be one error result, and one non-error result. + // Make result1 be the non-error result. + if result1.err != nil { + result1, result2 = result2, result1 + } + + if string(result1.m.Data) != "msg0" { + t.Errorf("After abort, got message: %v, want %v", result1.m.Data, "msg0") + } + if result1.err != nil { + t.Errorf("After abort, got : %v, want nil", result1.err) + } + if result2.m != nil { + t.Errorf("After abort, got message: %v, want nil", result2.m) + } + if result2.err != tc.want { + t.Errorf("After abort, got err: %v, want %v", result2.err, tc.want) + } + } + } +} + +func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) { + s := &fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + }, + } + + ctx := context.Background() + it := newIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: 0}) + + m, err := it.Next() + if err != nil { + t.Errorf("error calling Next: %v", err) + } + + events := make(chan string, 3) + go func() { + it.Stop() + events <- "stopped" + }() + go func() { + it.Stop() + events <- "stopped" + }() + + time.Sleep(10 * time.Millisecond) + events <- "nacked" + m.Done(false) + + if got, want := []string{<-events, <-events, <-events}, []string{"nacked", "stopped", "stopped"}; !reflect.DeepEqual(got, want) { + t.Errorf("stopping iterator, got: %v ; want: %v", got, want) + } + + // The iterator is stopped, so should not return another message. + m, err = it.Next() + if m != nil { + t.Errorf("message got: %v ; want: nil", m) + } + if err != Done { + t.Errorf("err got: %v ; want: %v", err, Done) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/keepalive.go b/vendor/cloud.google.com/go/pubsub/keepalive.go new file mode 100644 index 000000000..11b5739f4 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/keepalive.go @@ -0,0 +1,179 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" +) + +// keepAlive keeps track of which Messages need to have their deadline extended, and +// periodically extends them. +// Messages are tracked by Ack ID. +type keepAlive struct { + s service + Ctx context.Context // The context to use when extending deadlines. + Sub string // The full name of the subscription. + ExtensionTick <-chan time.Time // ExtenstionTick supplies the frequency with which to make extension requests. + Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency. + MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it. + + mu sync.Mutex + // key: ackID; value: time at which ack deadline extension should cease. + items map[string]time.Time + dr drain + + wg sync.WaitGroup +} + +// Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed. +func (ka *keepAlive) Start() { + ka.items = make(map[string]time.Time) + ka.dr = drain{Drained: make(chan struct{})} + ka.wg.Add(1) + go func() { + defer ka.wg.Done() + for { + select { + case <-ka.Ctx.Done(): + // Don't bother waiting for items to be removed: we can't extend them any more. + return + case <-ka.dr.Drained: + return + case <-ka.ExtensionTick: + live, expired := ka.getAckIDs() + ka.wg.Add(1) + go func() { + defer ka.wg.Done() + ka.extendDeadlines(live) + }() + + for _, id := range expired { + ka.Remove(id) + } + } + } + }() +} + +// Add adds an ack id to be kept alive. +// It should not be called after Stop. +func (ka *keepAlive) Add(ackID string) { + ka.mu.Lock() + defer ka.mu.Unlock() + + ka.items[ackID] = time.Now().Add(ka.MaxExtension) + ka.dr.SetPending(true) +} + +// Remove removes ackID from the list to be kept alive. +func (ka *keepAlive) Remove(ackID string) { + ka.mu.Lock() + defer ka.mu.Unlock() + + // Note: If users NACKs a message after it has been removed due to + // expiring, Remove will be called twice with same ack id. This is OK. + delete(ka.items, ackID) + ka.dr.SetPending(len(ka.items) != 0) +} + +// Stop waits until all added ackIDs have been removed, and cleans up resources. +// Stop may only be called once. +func (ka *keepAlive) Stop() { + ka.mu.Lock() + ka.dr.Drain() + ka.mu.Unlock() + + ka.wg.Wait() +} + +// getAckIDs returns the set of ackIDs that are being kept alive. +// The set is divided into two lists: one with IDs that should continue to be kept alive, +// and the other with IDs that should be dropped. +func (ka *keepAlive) getAckIDs() (live, expired []string) { + ka.mu.Lock() + defer ka.mu.Unlock() + + now := time.Now() + for id, expiry := range ka.items { + if expiry.Before(now) { + expired = append(expired, id) + } else { + live = append(live, id) + } + } + return live, expired +} + +const maxExtensionAttempts = 2 + +func (ka *keepAlive) extendDeadlines(ackIDs []string) { + head, tail := ka.s.splitAckIDs(ackIDs) + for len(head) > 0 { + for i := 0; i < maxExtensionAttempts; i++ { + if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil { + break + } + } + // NOTE: Messages whose deadlines we fail to extend will + // eventually be redelivered and this is a documented behaviour + // of the API. + // + // NOTE: If we fail to extend deadlines here, this + // implementation will continue to attempt extending the + // deadlines for those ack IDs the next time the extension + // ticker ticks. By then the deadline will have expired. + // Re-extending them is harmless, however. + // + // TODO: call Remove for ids which fail to be extended. + + head, tail = ka.s.splitAckIDs(tail) + } +} + +// A drain (once started) indicates via a channel when there is no work pending. +type drain struct { + started bool + pending bool + + // Drained is closed once there are no items outstanding if Drain has been called. + Drained chan struct{} +} + +// Drain starts the drain process. This cannot be undone. +func (d *drain) Drain() { + d.started = true + d.closeIfDrained() +} + +// SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain. +func (d *drain) SetPending(pending bool) { + d.pending = pending + d.closeIfDrained() +} + +func (d *drain) closeIfDrained() { + if !d.pending && d.started { + // Check to see if d.Drained is closed before closing it. + // This allows SetPending(false) to be safely called multiple times. + select { + case <-d.Drained: + default: + close(d.Drained) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/keepalive_test.go b/vendor/cloud.google.com/go/pubsub/keepalive_test.go new file mode 100644 index 000000000..0128afe64 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/keepalive_test.go @@ -0,0 +1,319 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestKeepAliveExtendsDeadline(t *testing.T) { + ticker := make(chan time.Time) + deadline := time.Nanosecond * 15 + s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} + + checkModDeadlineCall := func(ackIDs []string) { + got := <-s.modDeadlineCalled + sort.Strings(got.ackIDs) + + want := modDeadlineCall{ + subName: "subname", + deadline: deadline, + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("keepalive: got:\n%v\nwant:\n%v", got, want) + } + } + + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + Sub: "subname", + ExtensionTick: ticker, + Deadline: deadline, + MaxExtension: time.Hour, + } + ka.Start() + + ka.Add("a") + ka.Add("b") + ticker <- time.Time{} + checkModDeadlineCall([]string{"a", "b"}) + ka.Add("c") + ka.Remove("b") + ticker <- time.Time{} + checkModDeadlineCall([]string{"a", "c"}) + ka.Remove("a") + ka.Remove("c") + ka.Add("d") + ticker <- time.Time{} + checkModDeadlineCall([]string{"d"}) + + ka.Remove("d") + ka.Stop() +} + +func TestKeepAliveStopsWhenNoItem(t *testing.T) { + ticker := make(chan time.Time) + stopped := make(chan bool) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 3)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + } + + ka.Start() + + // There should be no call to modifyAckDeadline since there is no item. + ticker <- time.Time{} + + go func() { + ka.Stop() // No items; should not block + if len(s.modDeadlineCalled) > 0 { + t.Errorf("unexpected extension to non-existent items: %v", <-s.modDeadlineCalled) + } + close(stopped) + }() + + select { + case <-stopped: + case <-time.After(time.Second): + t.Errorf("keepAlive timed out waiting for stop") + } +} + +func TestKeepAliveStopsWhenItemsExpired(t *testing.T) { + ticker := make(chan time.Time) + stopped := make(chan bool) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 2)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + MaxExtension: time.Duration(0), // Should expire items at the first tick. + } + + ka.Start() + ka.Add("a") + ka.Add("b") + + // Wait until the clock advances. Without this loop, this test fails on + // Windows because the clock doesn't advance at all between ka.Add and the + // expiration check after the tick is received. + begin := time.Now() + for time.Now().Equal(begin) { + time.Sleep(time.Millisecond) + } + + // There should be no call to modifyAckDeadline since both items are expired. + ticker <- time.Time{} + + go func() { + ka.Stop() // No live items; should not block. + if len(s.modDeadlineCalled) > 0 { + t.Errorf("unexpected extension to expired items") + } + close(stopped) + }() + + select { + case <-stopped: + case <-time.After(time.Second): + t.Errorf("timed out waiting for stop") + } +} + +func TestKeepAliveBlocksUntilAllItemsRemoved(t *testing.T) { + ticker := make(chan time.Time) + eventc := make(chan string, 3) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + MaxExtension: time.Hour, // Should not expire. + } + + ka.Start() + ka.Add("a") + ka.Add("b") + + go func() { + ticker <- time.Time{} + + // We expect a call since both items should be extended. + select { + case args := <-s.modDeadlineCalled: + sort.Strings(args.ackIDs) + got := args.ackIDs + want := []string{"a", "b"} + if !reflect.DeepEqual(got, want) { + t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) + } + case <-time.After(time.Second): + t.Errorf("timed out waiting for deadline extend call") + } + + time.Sleep(10 * time.Millisecond) + + eventc <- "pre-remove-b" + // Remove one item, Stop should still be waiting. + ka.Remove("b") + + ticker <- time.Time{} + + // We expect a call since the item is still alive. + select { + case args := <-s.modDeadlineCalled: + got := args.ackIDs + want := []string{"a"} + if !reflect.DeepEqual(got, want) { + t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) + } + case <-time.After(time.Second): + t.Errorf("timed out waiting for deadline extend call") + } + + time.Sleep(10 * time.Millisecond) + + eventc <- "pre-remove-a" + // Remove the last item so that Stop can proceed. + ka.Remove("a") + }() + + go func() { + ka.Stop() // Should block all item are removed. + eventc <- "post-stop" + }() + + for i, want := range []string{"pre-remove-b", "pre-remove-a", "post-stop"} { + select { + case got := <-eventc: + if got != want { + t.Errorf("event #%d:\ngot %v\nwant %v", i, got, want) + } + case <-time.After(time.Second): + t.Errorf("time out waiting for #%d event: want %v", i, want) + } + } +} + +// extendCallResult contains a list of ackIDs which are expected in an ackID +// extension request, along with the result that should be returned. +type extendCallResult struct { + ackIDs []string + err error +} + +// extendService implements modifyAckDeadline using a hard-coded list of extendCallResults. +type extendService struct { + service + + calls []extendCallResult + + t *testing.T // used for error logging. +} + +func (es *extendService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + if len(es.calls) == 0 { + es.t.Fatalf("unexpected call to modifyAckDeadline: ackIDs: %v", ackIDs) + } + call := es.calls[0] + es.calls = es.calls[1:] + + if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { + es.t.Errorf("unexpected arguments to modifyAckDeadline: got: %v ; want: %v", got, want) + } + return call.err +} + +// Test implementation returns the first 2 elements as head, and the rest as tail. +func (es *extendService) splitAckIDs(ids []string) ([]string, []string) { + if len(ids) < 2 { + return ids, nil + } + return ids[:2], ids[2:] +} + +func TestKeepAliveSplitsBatches(t *testing.T) { + type testCase struct { + calls []extendCallResult + } + for _, tc := range []testCase{ + { + calls: []extendCallResult{ + { + ackIDs: []string{"a", "b"}, + }, + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + { + calls: []extendCallResult{ + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // On error we retry once. + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // We give up after failing twice, so we move on to the next set, "c" and "d". + { + ackIDs: []string{"c", "d"}, + err: errors.New("bang"), + }, + // Again, we retry once. + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + } { + s := &extendService{ + t: t, + calls: tc.calls, + } + + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + Sub: "subname", + } + + ka.extendDeadlines([]string{"a", "b", "c", "d", "e", "f"}) + + if len(s.calls) != 0 { + t.Errorf("expected extend calls did not occur: %v", s.calls) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go new file mode 100644 index 000000000..aa7b9d923 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "encoding/base64" + + raw "google.golang.org/api/pubsub/v1" +) + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + // This ID is assigned by the server and is populated for Messages obtained from a subscription. + // It is otherwise ignored. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string + + // ackID is the identifier to acknowledge this message. + ackID string + + // TODO(mcgreevy): add publish time. + + calledDone bool + + // The iterator that created this Message. + it *Iterator +} + +func toMessage(resp *raw.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{ackID: resp.AckId}, nil + } + data, err := base64.StdEncoding.DecodeString(resp.Message.Data) + if err != nil { + return nil, err + } + return &Message{ + ackID: resp.AckId, + Data: data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + }, nil +} + +// Done completes the processing of a Message that was returned from an Iterator. +// ack indicates whether the message should be acknowledged. +// Client code must call Done when finished for each Message returned by an iterator. +// Done may only be called on Messages returned by an iterator. +// If message acknowledgement fails, the Message will be redelivered. +// Calls to Done have no effect after the first call. +func (m *Message) Done(ack bool) { + if m.calledDone { + return + } + m.calledDone = true + m.it.done(m.ackID, ack) +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go new file mode 100644 index 000000000..f870fc640 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -0,0 +1,136 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub // import "cloud.google.com/go/pubsub" + +import ( + "fmt" + "net/http" + "os" + + "google.golang.org/api/option" + raw "google.golang.org/api/pubsub/v1" + "google.golang.org/api/transport" + + "golang.org/x/net/context" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +const prodAddr = "https://pubsub.googleapis.com/" +const userAgent = "gcloud-golang-pubsub/20151008" + +// Client is a Google Pub/Sub client, which may be used to perform Pub/Sub operations with a project. +// It must be constructed via NewClient. +type Client struct { + projectID string + s service +} + +// NewClient creates a new PubSub client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcloud emulator: + // https://option.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + o = []option.ClientOption{ + option.WithEndpoint("http://" + addr + "/"), + option.WithHTTPClient(http.DefaultClient), + } + } else { + o = []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(raw.PubsubScope, raw.CloudPlatformScope), + option.WithUserAgent(userAgent), + } + } + o = append(o, opts...) + httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + s, err := newPubSubService(httpClient, endpoint) + if err != nil { + return nil, fmt.Errorf("constructing pubsub client: %v", err) + } + + c := &Client{ + projectID: projectID, + s: s, + } + + return c, nil +} + +func (c *Client) fullyQualifiedProjectName() string { + return fmt.Sprintf("projects/%s", c.projectID) +} + +// pageToken stores the next page token for a server response which is split over multiple pages. +type pageToken struct { + tok string + explicit bool +} + +func (pt *pageToken) set(tok string) { + pt.tok = tok + pt.explicit = true +} + +func (pt *pageToken) get() string { + return pt.tok +} + +// more returns whether further pages should be fetched from the server. +func (pt *pageToken) more() bool { + return pt.tok != "" || !pt.explicit +} + +// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings. +type stringsIterator struct { + ctx context.Context + strings []string + token pageToken + fetch func(ctx context.Context, tok string) (*stringsPage, error) +} + +// Next returns the next string. If there are no more strings, Done will be returned. +func (si *stringsIterator) Next() (string, error) { + for len(si.strings) == 0 && si.token.more() { + page, err := si.fetch(si.ctx, si.token.get()) + if err != nil { + return "", err + } + si.token.set(page.tok) + si.strings = page.strings + } + + if len(si.strings) == 0 { + return "", Done + } + + s := si.strings[0] + si.strings = si.strings[1:] + + return s, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/puller.go b/vendor/cloud.google.com/go/pubsub/puller.go new file mode 100644 index 000000000..bc2ea8aad --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/puller.go @@ -0,0 +1,115 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + + "golang.org/x/net/context" +) + +// puller fetches messages from the server in a batch. +type puller struct { + ctx context.Context + cancel context.CancelFunc + + // keepAlive takes ownership of the lifetime of the message identified + // by ackID, ensuring that its ack deadline does not expire. It should + // be called each time a new message is fetched from the server, even + // if it is not yet returned from Next. + keepAlive func(ackID string) + + // abandon should be called for each message which has previously been + // passed to keepAlive, but will never be returned by Next. + abandon func(ackID string) + + // fetch fetches a batch of messages from the server. + fetch func() ([]*Message, error) + + mu sync.Mutex + buf []*Message +} + +// newPuller constructs a new puller. +// batchSize is the maximum number of messages to fetch at once. +// No more than batchSize messages will be outstanding at any time. +func newPuller(s service, subName string, ctx context.Context, batchSize int64, keepAlive, abandon func(ackID string)) *puller { + ctx, cancel := context.WithCancel(ctx) + return &puller{ + cancel: cancel, + keepAlive: keepAlive, + abandon: abandon, + ctx: ctx, + fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) }, + } +} + +const maxPullAttempts = 2 + +// Next returns the next message from the server, fetching a new batch if necessary. +// keepAlive is called with the ackIDs of newly fetched messages. +// If p.Ctx has already been cancelled before Next is called, no new messages +// will be fetched. +func (p *puller) Next() (*Message, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // If ctx has been cancelled, return straight away (even if there are buffered messages available). + select { + case <-p.ctx.Done(): + return nil, p.ctx.Err() + default: + } + + for len(p.buf) == 0 { + var buf []*Message + var err error + + for i := 0; i < maxPullAttempts; i++ { + // Once Stop has completed, all future calls to Next will immediately fail at this point. + buf, err = p.fetch() + if err == nil || err == context.Canceled || err == context.DeadlineExceeded { + break + } + } + if err != nil { + return nil, err + } + + for _, m := range buf { + p.keepAlive(m.ackID) + } + p.buf = buf + } + + m := p.buf[0] + p.buf = p.buf[1:] + return m, nil +} + +// Stop aborts any pending calls to Next, and prevents any future ones from succeeding. +// Stop also abandons any messages that have been pre-fetched. +// Once Stop completes, no calls to Next will succeed. +func (p *puller) Stop() { + // Next may be executing in another goroutine. Cancel it, and then wait until it terminates. + p.cancel() + p.mu.Lock() + defer p.mu.Unlock() + + for _, m := range p.buf { + p.abandon(m.ackID) + } + p.buf = nil +} diff --git a/vendor/cloud.google.com/go/pubsub/puller_test.go b/vendor/cloud.google.com/go/pubsub/puller_test.go new file mode 100644 index 000000000..b0a3e5d54 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/puller_test.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type fetchResult struct { + msgs []*Message + err error +} + +type fetcherService struct { + service + results []fetchResult + unexpectedCall bool +} + +func (s *fetcherService) fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) { + if len(s.results) == 0 { + s.unexpectedCall = true + return nil, errors.New("bang") + } + ret := s.results[0] + s.results = s.results[1:] + return ret.msgs, ret.err +} + +func TestPuller(t *testing.T) { + s := &fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + {}, + { + msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, + }, + { + msgs: []*Message{{ackID: "e"}}, + }, + }, + } + + pulled := make(chan string, 10) + + pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) + + got := []string{} + for i := 0; i < 5; i++ { + m, err := pull.Next() + got = append(got, m.ackID) + if err != nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + } + _, err := pull.Next() + if err == nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + + want := []string{"a", "b", "c", "d", "e"} + if !reflect.DeepEqual(got, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) + } +} + +func TestPullerAddsToKeepAlive(t *testing.T) { + s := &fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + { + msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, + }, + }, + } + + pulled := make(chan string, 10) + + pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) + + got := []string{} + for i := 0; i < 3; i++ { + m, err := pull.Next() + got = append(got, m.ackID) + if err != nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + } + + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(got, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) + } + + close(pulled) + // We should have seen "d" written to the channel too, even though it hasn't been returned yet. + pulledIDs := []string{} + for id := range pulled { + pulledIDs = append(pulledIDs, id) + } + + want = append(want, "d") + if !reflect.DeepEqual(pulledIDs, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", pulledIDs, want) + } +} + +func TestPullerRetriesOnce(t *testing.T) { + bang := errors.New("bang") + s := &fetcherService{ + results: []fetchResult{ + { + err: bang, + }, + { + err: bang, + }, + }, + } + + pull := newPuller(s, "subname", context.Background(), 2, func(string) {}, func(string) {}) + + _, err := pull.Next() + if err != bang { + t.Errorf("pull.Next err got: %v, want: %v", err, bang) + } + + if s.unexpectedCall { + t.Errorf("unexpected retry") + } + if len(s.results) != 0 { + t.Errorf("outstanding calls: got: %v, want: 0", len(s.results)) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go new file mode 100644 index 000000000..e894ec2d0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -0,0 +1,283 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "encoding/base64" + "fmt" + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/pubsub/v1" +) + +// service provides an internal abstraction to isolate the generated +// PubSub API; most of this package uses this interface instead. +// The single implementation, *apiService, contains all the knowledge +// of the generated PubSub API (except for that present in legacy code). +type service interface { + createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error + getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) + listProjectSubscriptions(ctx context.Context, projName, pageTok string) (*stringsPage, error) + deleteSubscription(ctx context.Context, name string) error + subscriptionExists(ctx context.Context, name string) (bool, error) + modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error + + createTopic(ctx context.Context, name string) error + deleteTopic(ctx context.Context, name string) error + topicExists(ctx context.Context, name string) (bool, error) + listProjectTopics(ctx context.Context, projName, pageTok string) (*stringsPage, error) + listTopicSubscriptions(ctx context.Context, topicName, pageTok string) (*stringsPage, error) + + modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error + fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) + publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) + + // splitAckIDs divides ackIDs into + // * a batch of a size which is suitable for passing to acknowledge or + // modifyAckDeadline, and + // * the rest. + splitAckIDs(ackIDs []string) ([]string, []string) + + // acknowledge ACKs the IDs in ackIDs. + acknowledge(ctx context.Context, subName string, ackIDs []string) error +} + +type apiService struct { + s *raw.Service +} + +func newPubSubService(client *http.Client, endpoint string) (*apiService, error) { + s, err := raw.New(client) + if err != nil { + return nil, err + } + s.BasePath = endpoint + + return &apiService{s: s}, nil +} + +func (s *apiService) createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error { + var rawPushConfig *raw.PushConfig + if pushConfig != nil { + rawPushConfig = &raw.PushConfig{ + Attributes: pushConfig.Attributes, + PushEndpoint: pushConfig.Endpoint, + } + } + rawSub := &raw.Subscription{ + AckDeadlineSeconds: int64(ackDeadline.Seconds()), + PushConfig: rawPushConfig, + Topic: topicName, + } + _, err := s.s.Projects.Subscriptions.Create(subName, rawSub).Context(ctx).Do() + return err +} + +func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) { + rawSub, err := s.s.Projects.Subscriptions.Get(subName).Context(ctx).Do() + if err != nil { + return nil, "", err + } + sub := &SubscriptionConfig{ + AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds), + PushConfig: PushConfig{ + Endpoint: rawSub.PushConfig.PushEndpoint, + Attributes: rawSub.PushConfig.Attributes, + }, + } + return sub, rawSub.Topic, err +} + +// stringsPage contains a list of strings and a token for fetching the next page. +type stringsPage struct { + strings []string + tok string +} + +func (s *apiService) listProjectSubscriptions(ctx context.Context, projName, pageTok string) (*stringsPage, error) { + resp, err := s.s.Projects.Subscriptions.List(projName).PageToken(pageTok).Context(ctx).Do() + if err != nil { + return nil, err + } + subs := []string{} + for _, sub := range resp.Subscriptions { + subs = append(subs, sub.Name) + } + return &stringsPage{subs, resp.NextPageToken}, nil +} + +func (s *apiService) deleteSubscription(ctx context.Context, name string) error { + _, err := s.s.Projects.Subscriptions.Delete(name).Context(ctx).Do() + return err +} + +func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) { + _, err := s.s.Projects.Subscriptions.Get(name).Context(ctx).Do() + if err == nil { + return true, nil + } + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return false, nil + } + return false, err +} + +func (s *apiService) createTopic(ctx context.Context, name string) error { + // Note: The raw API expects a Topic body, but ignores it. + _, err := s.s.Projects.Topics.Create(name, &raw.Topic{}). + Context(ctx). + Do() + return err +} + +func (s *apiService) listProjectTopics(ctx context.Context, projName, pageTok string) (*stringsPage, error) { + resp, err := s.s.Projects.Topics.List(projName).PageToken(pageTok).Context(ctx).Do() + if err != nil { + return nil, err + } + topics := []string{} + for _, topic := range resp.Topics { + topics = append(topics, topic.Name) + } + return &stringsPage{topics, resp.NextPageToken}, nil +} + +func (s *apiService) deleteTopic(ctx context.Context, name string) error { + _, err := s.s.Projects.Topics.Delete(name).Context(ctx).Do() + return err +} + +func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) { + _, err := s.s.Projects.Topics.Get(name).Context(ctx).Do() + if err == nil { + return true, nil + } + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return false, nil + } + return false, err +} + +func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName, pageTok string) (*stringsPage, error) { + resp, err := s.s.Projects.Topics.Subscriptions.List(topicName).PageToken(pageTok).Context(ctx).Do() + if err != nil { + return nil, err + } + subs := []string{} + for _, sub := range resp.Subscriptions { + subs = append(subs, sub) + } + return &stringsPage{subs, resp.NextPageToken}, nil +} + +func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + req := &raw.ModifyAckDeadlineRequest{ + AckDeadlineSeconds: int64(deadline.Seconds()), + AckIds: ackIDs, + } + _, err := s.s.Projects.Subscriptions.ModifyAckDeadline(subName, req). + Context(ctx). + Do() + return err +} + +// maxPayload is the maximum number of bytes to devote to actual ids in +// acknowledgement or modifyAckDeadline requests. Note that there is ~1K of +// constant overhead, plus 3 bytes per ID (two quotes and a comma). The total +// payload size may not exceed 512K. +const maxPayload = 500 * 1024 +const overheadPerID = 3 // 3 bytes of JSON + +// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. +func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { + total := 0 + for i, id := range ids { + total += len(id) + overheadPerID + if total > maxPayload { + return ids[:i], ids[i:] + } + } + return ids, nil +} + +func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + req := &raw.AcknowledgeRequest{ + AckIds: ackIDs, + } + _, err := s.s.Projects.Subscriptions.Acknowledge(subName, req). + Context(ctx). + Do() + return err +} + +func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int64) ([]*Message, error) { + req := &raw.PullRequest{ + MaxMessages: maxMessages, + } + resp, err := s.s.Projects.Subscriptions.Pull(subName, req). + Context(ctx). + Do() + + if err != nil { + return nil, err + } + + msgs := make([]*Message, 0, len(resp.ReceivedMessages)) + for i, m := range resp.ReceivedMessages { + msg, err := toMessage(m) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + + return msgs, nil +} + +func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) { + rawMsgs := make([]*raw.PubsubMessage, len(msgs)) + for i, msg := range msgs { + rawMsgs[i] = &raw.PubsubMessage{ + Data: base64.StdEncoding.EncodeToString(msg.Data), + Attributes: msg.Attributes, + } + } + + req := &raw.PublishRequest{Messages: rawMsgs} + resp, err := s.s.Projects.Topics.Publish(topicName, req). + Context(ctx). + Do() + + if err != nil { + return nil, err + } + return resp.MessageIds, nil +} + +func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error { + req := &raw.ModifyPushConfigRequest{ + PushConfig: &raw.PushConfig{ + Attributes: conf.Attributes, + PushEndpoint: conf.Endpoint, + }, + } + _, err := s.s.Projects.Subscriptions.ModifyPushConfig(subName, req). + Context(ctx). + Do() + return err +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go new file mode 100644 index 000000000..09931b7ff --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -0,0 +1,255 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" +) + +// The default period for which to automatically extend Message acknowledgement deadlines. +const DefaultMaxExtension = 10 * time.Minute + +// The default maximum number of messages that are prefetched from the server. +const DefaultMaxPrefetch = 100 + +// Subscription is a reference to a PubSub subscription. +type Subscription struct { + s service + + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string +} + +// Subscription creates a reference to a subscription. +func (c *Client) Subscription(name string) *Subscription { + return &Subscription{ + s: c.s, + name: fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, name), + } +} + +// Name returns the globally unique name for the subscription. +func (s *Subscription) Name() string { + return s.name +} + +// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. +func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { + return &SubscriptionIterator{ + s: c.s, + stringsIterator: stringsIterator{ + ctx: ctx, + fetch: func(ctx context.Context, tok string) (*stringsPage, error) { + return c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName(), tok) + }, + }, + } +} + +// SubscriptionIterator is an iterator that returns a series of subscriptions. +type SubscriptionIterator struct { + s service + stringsIterator +} + +// Next returns the next subscription. If there are no more subscriptions, Done will be returned. +func (subs *SubscriptionIterator) Next() (*Subscription, error) { + subName, err := subs.stringsIterator.Next() + if err != nil { + return nil, err + } + + return &Subscription{s: subs.s, name: subName}, nil +} + +// PushConfig contains configuration for subscriptions that operate in push mode. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + Endpoint string + + // Endpoint configuration attributes. See https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions#PushConfig.FIELDS.attributes for more details. + Attributes map[string]string +} + +// Subscription config contains the configuration of a subscription. +type SubscriptionConfig struct { + Topic *Topic + PushConfig PushConfig + + // The default maximum time after a subscriber receives a message + // before the subscriber should acknowledge the message. Note: + // messages which are obtained via an Iterator need not be acknowledged + // within this deadline, as the deadline will be automatically + // extended. + AckDeadline time.Duration +} + +// Delete deletes the subscription. +func (s *Subscription) Delete(ctx context.Context) error { + return s.s.deleteSubscription(ctx, s.name) +} + +// Exists reports whether the subscription exists on the server. +func (s *Subscription) Exists(ctx context.Context) (bool, error) { + return s.s.subscriptionExists(ctx, s.name) +} + +// Config fetches the current configuration for the subscription. +func (s *Subscription) Config(ctx context.Context) (*SubscriptionConfig, error) { + conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name) + if err != nil { + return nil, err + } + conf.Topic = &Topic{ + s: s.s, + name: topicName, + } + return conf, nil +} + +// Pull returns an Iterator that can be used to fetch Messages. The Iterator +// will automatically extend the ack deadline of all fetched Messages, for the +// period specified by DefaultMaxExtension. This may be overridden by supplying +// a MaxExtension pull option. +// +// If ctx is cancelled or exceeds its deadline, outstanding acks or deadline +// extensions will fail. +// +// The caller must call Stop on the Iterator once finished with it. +func (s *Subscription) Pull(ctx context.Context, opts ...PullOption) (*Iterator, error) { + config, err := s.Config(ctx) + if err != nil { + return nil, err + } + po := processPullOptions(opts) + po.ackDeadline = config.AckDeadline + return newIterator(ctx, s.s, s.name, po), nil +} + +// ModifyPushConfig updates the endpoint URL and other attributes of a push subscription. +func (s *Subscription) ModifyPushConfig(ctx context.Context, conf *PushConfig) error { + if conf == nil { + return errors.New("must supply non-nil PushConfig") + } + + return s.s.modifyPushConfig(ctx, s.name, conf) +} + +// A PullOption is an optional argument to Subscription.Pull. +type PullOption interface { + setOptions(o *pullOptions) +} + +type pullOptions struct { + // maxExtension is the maximum period for which the iterator should + // automatically extend the ack deadline for each message. + maxExtension time.Duration + + // maxPrefetch is the maximum number of Messages to have in flight, to + // be returned by Iterator.Next. + maxPrefetch int + + // ackDeadline is the default ack deadline for the subscription. Not + // configurable via a PullOption. + ackDeadline time.Duration +} + +func processPullOptions(opts []PullOption) *pullOptions { + po := &pullOptions{ + maxExtension: DefaultMaxExtension, + maxPrefetch: DefaultMaxPrefetch, + } + + for _, o := range opts { + o.setOptions(po) + } + + return po +} + +type maxPrefetch int + +func (max maxPrefetch) setOptions(o *pullOptions) { + if o.maxPrefetch = int(max); o.maxPrefetch < 1 { + o.maxPrefetch = 1 + } +} + +// MaxPrefetch returns a PullOption that limits Message prefetching. +// +// For performance reasons, the pubsub library may prefetch a pool of Messages +// to be returned serially from Iterator.Next. MaxPrefetch is used to limit the +// the size of this pool. +// +// If num is less than 1, it will be treated as if it were 1. +func MaxPrefetch(num int) PullOption { + return maxPrefetch(num) +} + +type maxExtension time.Duration + +func (max maxExtension) setOptions(o *pullOptions) { + if o.maxExtension = time.Duration(max); o.maxExtension < 0 { + o.maxExtension = 0 + } +} + +// MaxExtension returns a PullOption that limits how long acks deadlines are +// extended for. +// +// An Iterator will automatically extend the ack deadline of all fetched +// Messages for the duration specified. Automatic deadline extension may be +// disabled by specifying a duration of 0. +func MaxExtension(duration time.Duration) PullOption { + return maxExtension(duration) +} + +// CreateSubscription creates a new subscription on a topic. +// +// name is the name of the subscription to create. It must start with a letter, +// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), +// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It +// must be between 3 and 255 characters in length, and must not start with +// "goog". +// +// topic is the topic from which the subscription should receive messages. It +// need not belong to the same project as the subscription. +// +// ackDeadline is the maximum time after a subscriber receives a message before +// the subscriber should acknowledge the message. It must be between 10 and 600 +// seconds (inclusive), and is rounded down to the nearest second. If the +// provided ackDeadline is 0, then the default value of 10 seconds is used. +// Note: messages which are obtained via an Iterator need not be acknowledged +// within this deadline, as the deadline will be automatically extended. +// +// pushConfig may be set to configure this subscription for push delivery. +// +// If the subscription already exists an error will be returned. +func (c *Client) CreateSubscription(ctx context.Context, name string, topic *Topic, ackDeadline time.Duration, pushConfig *PushConfig) (*Subscription, error) { + if ackDeadline == 0 { + ackDeadline = 10 * time.Second + } + if d := ackDeadline.Seconds(); d < 10 || d > 600 { + return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) + } + + sub := c.Subscription(name) + err := c.s.createSubscription(ctx, topic.Name(), sub.Name(), ackDeadline, pushConfig) + return sub, err +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription_test.go b/vendor/cloud.google.com/go/pubsub/subscription_test.go new file mode 100644 index 000000000..957141158 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription_test.go @@ -0,0 +1,147 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type subListCall struct { + inTok, outTok string + subs []string + err error +} + +type subListService struct { + service + calls []subListCall + + t *testing.T // for error logging. +} + +func (s *subListService) listSubs(pageTok string) (*stringsPage, error) { + if len(s.calls) == 0 { + s.t.Errorf("unexpected call: pageTok: %q", pageTok) + return nil, errors.New("bang") + } + + call := s.calls[0] + s.calls = s.calls[1:] + if call.inTok != pageTok { + s.t.Errorf("page token: got: %v, want: %v", pageTok, call.inTok) + } + return &stringsPage{call.subs, call.outTok}, call.err +} + +func (s *subListService) listProjectSubscriptions(ctx context.Context, projName, pageTok string) (*stringsPage, error) { + if projName != "projects/projid" { + s.t.Errorf("unexpected call: projName: %q, pageTok: %q", projName, pageTok) + return nil, errors.New("bang") + } + return s.listSubs(pageTok) +} + +func (s *subListService) listTopicSubscriptions(ctx context.Context, topicName, pageTok string) (*stringsPage, error) { + if topicName != "projects/projid/topics/topic" { + s.t.Errorf("unexpected call: topicName: %q, pageTok: %q", topicName, pageTok) + return nil, errors.New("bang") + } + return s.listSubs(pageTok) +} + +// All returns the remaining subscriptions from this iterator. +func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) { + var subs []*Subscription + for { + switch sub, err := it.Next(); err { + case nil: + subs = append(subs, sub) + case Done: + return subs, nil + default: + return nil, err + } + } +} + +func TestListProjectSubscriptions(t *testing.T) { + calls := []subListCall{ + { + subs: []string{"s1", "s2"}, + outTok: "a", + }, + { + inTok: "a", + subs: []string{"s3"}, + outTok: "", + }, + } + s := &subListService{calls: calls, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{"s1", "s2", "s3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.calls) != 0 { + t.Errorf("outstanding calls: %v", s.calls) + } +} + +func TestListTopicSubscriptions(t *testing.T) { + calls := []subListCall{ + { + subs: []string{"s1", "s2"}, + outTok: "a", + }, + { + inTok: "a", + subs: []string{"s3"}, + outTok: "", + }, + } + s := &subListService{calls: calls, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Topic("topic").Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{"s1", "s2", "s3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.calls) != 0 { + t.Errorf("outstanding calls: %v", s.calls) + } +} + +func subNames(subs []*Subscription) []string { + var names []string + + for _, sub := range subs { + names = append(names, sub.name) + + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go new file mode 100644 index 000000000..c0bc4217a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -0,0 +1,124 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + + "golang.org/x/net/context" +) + +const MaxPublishBatchSize = 1000 + +// Topic is a reference to a PubSub topic. +type Topic struct { + s service + + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string +} + +// CreateTopic creates a new topic. +// The specified topic name must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". +// If the topic already exists an error will be returned. +func (c *Client) CreateTopic(ctx context.Context, name string) (*Topic, error) { + t := c.Topic(name) + err := c.s.createTopic(ctx, t.Name()) + return t, err +} + +// Topic creates a reference to a topic. +func (c *Client) Topic(name string) *Topic { + return &Topic{s: c.s, name: fmt.Sprintf("projects/%s/topics/%s", c.projectID, name)} +} + +// Topics returns an iterator which returns all of the topics for the client's project. +func (c *Client) Topics(ctx context.Context) *TopicIterator { + return &TopicIterator{ + s: c.s, + stringsIterator: stringsIterator{ + ctx: ctx, + fetch: func(ctx context.Context, tok string) (*stringsPage, error) { + return c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName(), tok) + }, + }, + } +} + +// TopicIterator is an iterator that returns a series of topics. +type TopicIterator struct { + s service + stringsIterator +} + +// Next returns the next topic. If there are no more topics, Done will be returned. +func (tps *TopicIterator) Next() (*Topic, error) { + topicName, err := tps.stringsIterator.Next() + if err != nil { + return nil, err + } + return &Topic{s: tps.s, name: topicName}, nil +} + +// Name returns the globally unique name for the topic. +func (t *Topic) Name() string { + return t.name +} + +// Delete deletes the topic. +func (t *Topic) Delete(ctx context.Context) error { + return t.s.deleteTopic(ctx, t.name) +} + +// Exists reports whether the topic exists on the server. +func (t *Topic) Exists(ctx context.Context) (bool, error) { + if t.name == "_deleted-topic_" { + return false, nil + } + + return t.s.topicExists(ctx, t.name) +} + +// Subscriptions returns an iterator which returns the subscriptions for this topic. +func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { + // NOTE: zero or more Subscriptions that are ultimately returned by this + // Subscriptions iterator may belong to a different project to t. + return &SubscriptionIterator{ + s: t.s, + stringsIterator: stringsIterator{ + ctx: ctx, + fetch: func(ctx context.Context, tok string) (*stringsPage, error) { + + return t.s.listTopicSubscriptions(ctx, t.name, tok) + }, + }, + } +} + +// Publish publishes the supplied Messages to the topic. +// If successful, the server-assigned message IDs are returned in the same order as the supplied Messages. +// At most MaxPublishBatchSize messages may be supplied. +func (t *Topic) Publish(ctx context.Context, msgs ...*Message) ([]string, error) { + if len(msgs) == 0 { + return nil, nil + } + if len(msgs) > MaxPublishBatchSize { + return nil, fmt.Errorf("pubsub: got %d messages, but maximum batch size is %d", len(msgs), MaxPublishBatchSize) + } + return t.s.publishMessages(ctx, t.name, msgs) +} diff --git a/vendor/cloud.google.com/go/pubsub/topic_test.go b/vendor/cloud.google.com/go/pubsub/topic_test.go new file mode 100644 index 000000000..911fc8020 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic_test.go @@ -0,0 +1,141 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type topicListCall struct { + inTok, outTok string + topics []string + err error +} + +type topicListService struct { + service + calls []topicListCall + + t *testing.T // for error logging. +} + +func (s *topicListService) listProjectTopics(ctx context.Context, projName, pageTok string) (*stringsPage, error) { + if len(s.calls) == 0 || projName != "projects/projid" { + s.t.Errorf("unexpected call: projName: %q, pageTok: %q", projName, pageTok) + return nil, errors.New("bang") + } + + call := s.calls[0] + s.calls = s.calls[1:] + if call.inTok != pageTok { + s.t.Errorf("page token: got: %v, want: %v", pageTok, call.inTok) + } + return &stringsPage{call.topics, call.outTok}, call.err +} + +func checkTopicListing(t *testing.T, calls []topicListCall, want []string) { + s := &topicListService{calls: calls, t: t} + c := &Client{projectID: "projid", s: s} + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Errorf("error listing topics: %v", err) + } + got := topicNames(topics) + if !reflect.DeepEqual(got, want) { + t.Errorf("topic list: got: %v, want: %v", got, want) + } + if len(s.calls) != 0 { + t.Errorf("outstanding calls: %v", s.calls) + } +} + +// All returns the remaining topics from this iterator. +func slurpTopics(it *TopicIterator) ([]*Topic, error) { + var topics []*Topic + for { + switch topic, err := it.Next(); err { + case nil: + topics = append(topics, topic) + case Done: + return topics, nil + default: + return nil, err + } + } +} + +func TestListTopics(t *testing.T) { + calls := []topicListCall{ + { + topics: []string{"t1", "t2"}, + outTok: "a", + }, + { + inTok: "a", + topics: []string{"t3"}, + outTok: "b", + }, + { + inTok: "b", + topics: []string{}, + outTok: "c", + }, + { + inTok: "c", + topics: []string{"t4"}, + outTok: "", + }, + } + checkTopicListing(t, calls, []string{"t1", "t2", "t3", "t4"}) +} + +func TestListCompletelyEmptyTopics(t *testing.T) { + calls := []topicListCall{ + { + outTok: "", + }, + } + var want []string + checkTopicListing(t, calls, want) +} + +func TestListFinalEmptyPage(t *testing.T) { + calls := []topicListCall{ + { + topics: []string{"t1", "t2"}, + outTok: "a", + }, + { + inTok: "a", + topics: []string{}, + outTok: "", + }, + } + checkTopicListing(t, calls, []string{"t1", "t2"}) +} + +func topicNames(topics []*Topic) []string { + var names []string + + for _, topic := range topics { + names = append(names, topic.name) + + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/utils_test.go b/vendor/cloud.google.com/go/pubsub/utils_test.go new file mode 100644 index 000000000..d0b54202f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/utils_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "golang.org/x/net/context" +) + +type modDeadlineCall struct { + subName string + deadline time.Duration + ackIDs []string +} + +type acknowledgeCall struct { + subName string + ackIDs []string +} + +type testService struct { + service + + // The arguments of each call to modifyAckDealine are written to this channel. + modDeadlineCalled chan modDeadlineCall + + // The arguments of each call to acknowledge are written to this channel. + acknowledgeCalled chan acknowledgeCall +} + +func (s *testService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + s.modDeadlineCalled <- modDeadlineCall{ + subName: subName, + deadline: deadline, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + s.acknowledgeCalled <- acknowledgeCall{ + subName: subName, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) splitAckIDs(ids []string) ([]string, []string) { + return ids, nil +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go new file mode 100644 index 000000000..1ae362d16 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go @@ -0,0 +1,21 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an experimental, auto-generated package for the +// speech API. +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1beta1" diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech.go new file mode 100644 index 000000000..295934571 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech.go @@ -0,0 +1,21 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +const ( + gapicNameVersion = "gapic/0.1.0" +) diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go new file mode 100644 index 000000000..503bba220 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go @@ -0,0 +1,155 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "fmt" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of this client. +type CallOptions struct { + SyncRecognize []gax.CallOption + AsyncRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + + return &CallOptions{ + SyncRecognize: retry[[2]string{"default", "idempotent"}], + AsyncRecognize: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Speech. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewClient creates a new speech service client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + client: speechpb.NewSpeechClient(conn), + CallOptions: defaultCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// SyncRecognize perform synchronous speech-recognition: receive results after all audio +// has been sent and processed. +func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *speechpb.SyncRecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.SyncRecognize(ctx, req) + return err + }, c.CallOptions.SyncRecognize...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognize perform asynchronous speech-recognition: receive results via the +// google.longrunning.Operations interface. `Operation.response` returns +// `AsyncRecognizeResponse`. +func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*longrunningpb.Operation, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AsyncRecognize(ctx, req) + return err + }, c.CallOptions.AsyncRecognize...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go new file mode 100644 index 000000000..30d8819ec --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go @@ -0,0 +1,69 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "cloud.google.com/go/speech/apiv1beta1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_SyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.SyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AsyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.AsyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AsyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 000000000..e4d968b0a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,198 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + Role ACLRole +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the permission level for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { + if a.object != "" { + return a.objectSet(ctx, entity, role) + } + if a.isDefault { + return a.bucketDefaultSet(ctx, entity, role) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + acls, err := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + _, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + err := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + acls, err := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + _, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + err := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + acls, err := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + _, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + err := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func toACLRules(items []interface{}) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, v := range items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 000000000..0fa984c1e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,348 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + req := b.c.raw.Buckets.Insert(projectID, bkt) + _, err := req.Context(ctx).Do() + return err +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) error { + req := b.c.raw.Buckets.Delete(b.name) + return req.Context(ctx).Do() +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (c *BucketHandle) ACL() *ACLHandle { + return c.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (c *BucketHandle) DefaultObjectACL() *ACLHandle { + return c.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: &ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + }, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { + resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +type BucketAttrs struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + MetaGeneration int64 + + // StorageClass is the storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". + StorageClass string + + // Created is the creation time of the bucket. + Created time.Time +} + +func newBucket(b *raw.Bucket) *BucketAttrs { + if b == nil { + return nil + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + } +} + +// ObjectList represents a list of objects returned from a bucket List call. +type ObjectList struct { + // Results represent a list of object results. + Results []*ObjectAttrs + + // Next is the continuation query to retrieve more + // results with the same filtering criteria. If there + // are no more results to retrieve, it is nil. + Next *Query + + // Prefixes represents prefixes of objects + // matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string +} + +// List lists objects from the bucket. You can specify a query +// to filter the results. If q is nil, no filtering is applied. +// +// Deprecated. Use BucketHandle.Objects instead. +func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) { + it := b.Objects(ctx, q) + nextToken, err := it.fetch(it.pageInfo.MaxSize, it.pageInfo.Token) + if err != nil { + return nil, err + } + list := &ObjectList{} + for _, item := range it.items { + if item.Prefix != "" { + list.Prefixes = append(list.Prefixes, item.Prefix) + } else { + list.Results = append(list.Results, item) + } + } + if nextToken != "" { + it.query.Cursor = nextToken + list.Next = &it.query + } + return list, nil +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + it.pageInfo.MaxSize = q.MaxResults + it.pageInfo.Token = q.Cursor + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + resp, err := req.Context(it.ctx).Do() + if err != nil { + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.client.raw.Buckets.List(it.projectID) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + resp, err := req.Context(it.ctx).Do() + if err != nil { + return "", err + } + for _, item := range resp.Items { + it.buckets = append(it.buckets, newBucket(item)) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/example_test.go b/vendor/cloud.google.com/go/storage/example_test.go new file mode 100644 index 000000000..32de1f578 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/example_test.go @@ -0,0 +1,354 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "fmt" + "io/ioutil" + "log" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func ExampleNewClient_auth() { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := storage.NewClient(ctx) + if err != nil { + log.Fatal(err) + } + + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleBucketHandle_Create() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Delete(ctx); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + attrs, err := client.Bucket("my-bucket").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleBucketHandle_Objects() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + _ = it // TODO: iterate using Next or NextPage. +} + +func ExampleObjectIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != storage.Done { + // TODO: Handle error. + } + if err == storage.Done { + break + } + fmt.Println(objAttrs) + } +} + +func ExampleSignedURL() { + pkey, err := ioutil.ReadFile("my-private-key.pem") + if err != nil { + // TODO: handle error. + } + url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{ + GoogleAccessID: "xxx@developer.gserviceaccount.com", + PrivateKey: pkey, + Method: "GET", + Expires: time.Now().Add(48 * time.Hour), + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(url) +} + +func ExampleObjectHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_Attrs_withConditions() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Read the object. + objAttrs1, err := obj.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Do something else for a while. + time.Sleep(5 * time.Minute) + // Now read the same contents, even if the object has been written since the last read. + objAttrs2, err := obj.WithConditions(storage.Generation(objAttrs1.Generation)).Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs1, objAttrs2) +} + +func ExampleObjectHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Change only the content type of the object. + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrs{ + ContentType: "text/html", + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleBucketHandle_List() { + ctx := context.Background() + var client *storage.Client // See Example (Auth) + + var query *storage.Query + for { + // If you are using this package on the App Engine Flex runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + objects, err := client.Bucket("bucketname").List(ctx, query) + if err != nil { + log.Fatal(err) + } + for _, obj := range objects.Results { + log.Printf("object name: %s, size: %v", obj.Name, obj.Size) + } + // If there are more results, objects.Next will be non-nil. + if objects.Next == nil { + break + } + query = objects.Next + } + + log.Println("paginated through all object items in the bucket you specified.") +} + +func ExampleObjectHandle_NewReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("file contents:", slurp) +} + +func ExampleObjectHandle_NewRangeReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Read only the first 64K. + rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("first 64K of file contents:", slurp) +} + +func ExampleObjectHandle_NewWriter() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +func ExampleObjectHandle_CopyTo() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + o, err := src.CopyTo(ctx, dst, nil) + if err != nil { + // TODO: handle error. + } + fmt.Println("copied file:", o) +} + +func ExampleObjectHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // To delete multiple objects in a bucket, list them with an + // ObjectIterator, then Delete them. + + // If you are using this package on the App Engine Flex runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + bucket := client.Bucket("my-bucket") + it := bucket.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != storage.Done { + // TODO: Handle error. + } + if err == storage.Done { + break + } + if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { + // TODO: Handle error. + } + } + fmt.Println("deleted all object items in the bucket specified.") +} + +func ExampleACLHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // No longer grant access to the bucket to everyone on the Internet. + if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_Set() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Let any authenticated user read my-bucket/my-object. + obj := client.Bucket("my-bucket").Object("my-object") + if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_List() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // List the default object ACLs for my-bucket. + aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(aclRules) +} diff --git a/vendor/cloud.google.com/go/storage/integration_test.go b/vendor/cloud.google.com/go/storage/integration_test.go new file mode 100644 index 000000000..e30bef9f8 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/integration_test.go @@ -0,0 +1,848 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net/http" + "os" + "sort" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" +) + +const testPrefix = "-go-cloud-storage-test" + +// suffix is a timestamp-based suffix which is added to all buckets created by +// tests. This reduces flakiness when the tests are run in parallel and allows +// automatic cleaning up of artifacts left when tests fail. +var suffix = fmt.Sprintf("%s-%d", testPrefix, time.Now().UnixNano()) + +func TestMain(m *testing.M) { + integrationTest := initIntegrationTest() + exit := m.Run() + if integrationTest { + if err := cleanup(); err != nil { + log.Fatalf("Post-test cleanup failed: %v", err) + } + } + os.Exit(exit) +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() bool { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + if testing.Short() { + return false + } + client, bucket := config(ctx) + if client == nil { + return false + } + defer client.Close() + if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { + log.Fatalf("creating bucket %q: %v", bucket, err) + } + return true +} + +// testConfig returns the Client used to access GCS and the default bucket +// name to use. testConfig skips the current test if credentials are not +// available or when being run in Short mode. +func testConfig(ctx context.Context, t *testing.T) (*Client, string) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + client, bucket := config(ctx) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + return client, bucket +} + +// config is like testConfig, but it doesn't need a *testing.T. +func config(ctx context.Context) (*Client, string) { + ts := testutil.TokenSource(ctx, ScopeFullControl) + if ts == nil { + return nil, "" + } + p := testutil.ProjID() + if p == "" { + log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + return client, p + suffix +} + +func TestBucketMethods(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + projectID := testutil.ProjID() + newBucket := bucket + "-new" + // Test Create and Delete. + if err := client.Bucket(newBucket).Create(ctx, projectID, nil); err != nil { + t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } + + // Test Create and Delete with attributes. + attrs := BucketAttrs{ + DefaultObjectACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + } + if err := client.Bucket(newBucket).Create(ctx, projectID, &attrs); err != nil { + t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, attrs, err) + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } +} + +func TestIntegration_ConditionalDelete(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + o := client.Bucket(bucket).Object("conddel") + + wc := o.NewWriter(ctx) + wc.ContentType = "text/plain" + if _, err := wc.Write([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := wc.Close(); err != nil { + t.Fatal(err) + } + + gen := wc.Attrs().Generation + metaGen := wc.Attrs().MetaGeneration + + if err := o.WithConditions(Generation(gen - 1)).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with Generation") + } + if err := o.WithConditions(IfMetaGenerationMatch(metaGen + 1)).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") + } + if err := o.WithConditions(IfMetaGenerationNotMatch(metaGen)).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") + } + if err := o.WithConditions(Generation(gen)).Delete(ctx); err != nil { + t.Fatalf("final delete failed: %v", err) + } +} + +func TestObjects(t *testing.T) { + // TODO(djd): there are a lot of closely-related tests here which share + // a common setup. Once we can depend on Go 1.7 features, we should refactor + // this test to use the sub-test feature. This will increase the readability + // of this test, and should also reduce the time it takes to execute. + // https://golang.org/pkg/testing/#hdr-Subtests_and_Sub_benchmarks + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + const defaultType = "text/plain" + + // Populate object names and make a map for their contents. + objects := []string{ + "obj1", + "obj2", + "obj/with/slashes", + } + contents := make(map[string][]byte) + + // Test Writer. + for _, obj := range objects { + t.Logf("Writing %q", obj) + wc := bkt.Object(obj).NewWriter(ctx) + wc.ContentType = defaultType + c := randomContents() + if _, err := wc.Write(c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + if err := wc.Close(); err != nil { + t.Errorf("Close for %v failed with %v", obj, err) + } + contents[obj] = c + } + + testBucketList(t, bkt, objects) + testObjectIterator(t, bkt, objects) + + // Test Reader. + for _, obj := range objects { + t.Logf("Creating a reader to read %v", obj) + rc, err := bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Errorf("Can't create a reader for %v, errored with %v", obj, err) + continue + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.Size(), len(contents[obj]); got != int64(want) { + t.Errorf("Size (%q) = %d; want %d", obj, got, want) + } + if got, want := rc.ContentType(), "text/plain"; got != want { + t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) + } + rc.Close() + + // Test SignedURL + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + u, err := SignedURL(bucket, obj, opts) + if err != nil { + t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) + } + res, err := client.hc.Get(u) + if err != nil { + t.Fatalf("Can't get URL %q: %v", u, err) + } + slurp, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%v) = %q; want %q", obj, got, want) + } + res.Body.Close() + } + + obj := objects[0] + objlen := int64(len(contents[obj])) + // Test Range Reader. + for i, r := range []struct { + offset, length, want int64 + }{ + {0, objlen, objlen}, + {0, objlen / 2, objlen / 2}, + {objlen / 2, objlen, objlen / 2}, + {0, 0, 0}, + {objlen / 2, 0, 0}, + {objlen / 2, -1, objlen / 2}, + {0, objlen * 2, objlen}, + } { + t.Logf("%d: bkt.Object(%v).NewRangeReader(ctx, %d, %d)", i, obj, r.offset, r.length) + rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) + if err != nil { + t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) + continue + } + if rc.Size() != objlen { + t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) + } + if rc.Remain() != r.want { + t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) + continue + } + if len(slurp) != int(r.want) { + t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) + continue + } + if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { + t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) + } + rc.Close() + } + + // Test content encoding + const zeroCount = 20 << 20 + w := bkt.Object("gzip-test").NewWriter(ctx) + w.ContentEncoding = "gzip" + gw := gzip.NewWriter(w) + if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { + t.Fatalf("io.Copy, upload: %v", err) + } + if err := gw.Close(); err != nil { + t.Errorf("gzip.Close(): %v", err) + } + if err := w.Close(); err != nil { + t.Errorf("w.Close(): %v", err) + } + r, err := bkt.Object("gzip-test").NewReader(ctx) + if err != nil { + t.Fatalf("NewReader(gzip-test): %v", err) + } + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + t.Errorf("io.Copy, download: %v", err) + } + if n != zeroCount { + t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) + } + + // Test NotFound. + _, err = bkt.Object("obj-not-exists").NewReader(ctx) + if err != ErrObjectNotExist { + t.Errorf("Object should not exist, err found to be %v", err) + } + + objName := objects[0] + + // Test NewReader googleapi.Error. + // Since a 429 or 5xx is hard to cause, we trigger a 416. + realLen := len(contents[objName]) + _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) + if err, ok := err.(*googleapi.Error); !ok { + t.Error("NewRangeReader did not return a googleapi.Error") + } else { + if err.Code != 416 { + t.Errorf("Code = %d; want %d", err.Code, 416) + } + if len(err.Header) == 0 { + t.Error("Missing googleapi.Error.Header") + } + if len(err.Body) == 0 { + t.Error("Missing googleapi.Error.Body") + } + } + + // Test StatObject. + o, err := bkt.Object(objName).Attrs(ctx) + if err != nil { + t.Error(err) + } + if got, want := o.Name, objName; got != want { + t.Errorf("Name (%v) = %q; want %q", objName, got, want) + } + if got, want := o.ContentType, defaultType; got != want { + t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) + } + created := o.Created + // Check that the object is newer than its containing bucket. + bAttrs, err := bkt.Attrs(ctx) + if err != nil { + t.Error(err) + } + if o.Created.Before(bAttrs.Created) { + t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) + } + + // Test object copy. + copyName := "copy-" + objName + copyObj, err := bkt.Object(objName).CopyTo(ctx, bkt.Object(copyName), nil) + if err != nil { + t.Errorf("CopyTo failed with %v", err) + } + if copyObj.Name != copyName { + t.Errorf("Copy object's name = %q; want %q", copyObj.Name, copyName) + } + if copyObj.Bucket != bucket { + t.Errorf("Copy object's bucket = %q; want %q", copyObj.Bucket, bucket) + } + + // Test UpdateAttrs. + updated, err := bkt.Object(objName).Update(ctx, ObjectAttrs{ + ContentType: "text/html", + ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } + if want := "text/html"; updated.ContentType != want { + t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want) + } + if want := created; updated.Created != want { + t.Errorf("updated.Created == %q; want %q", updated.Created, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + + // Test checksums. + checksumCases := []struct { + name string + contents [][]byte + size int64 + md5 string + crc32c uint32 + }{ + { + name: "checksum-object", + contents: [][]byte{[]byte("hello"), []byte("world")}, + size: 10, + md5: "fc5e038d38a57032085441e7fe7010b0", + crc32c: 1456190592, + }, + { + name: "zero-object", + contents: [][]byte{}, + size: 0, + md5: "d41d8cd98f00b204e9800998ecf8427e", + crc32c: 0, + }, + } + for _, c := range checksumCases { + wc := bkt.Object(c.name).NewWriter(ctx) + for _, data := range c.contents { + if _, err := wc.Write(data); err != nil { + t.Errorf("Write(%q) failed with %q", data, err) + } + } + if err = wc.Close(); err != nil { + t.Errorf("%q: close failed with %q", c.name, err) + } + obj := wc.Attrs() + if got, want := obj.Size, c.size; got != want { + t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) + } + if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { + t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) + } + if got, want := obj.CRC32C, c.crc32c; got != want { + t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) + } + } + + // Test public ACL. + publicObj := objects[0] + if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { + t.Errorf("PutACLEntry failed with %v", err) + } + publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) + if err != nil { + t.Fatal(err) + } + rc, err := publicClient.Bucket(bucket).Object(publicObj).NewReader(ctx) + if err != nil { + t.Error(err) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("ReadAll failed with %v", err) + } + if !bytes.Equal(slurp, contents[publicObj]) { + t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) + } + rc.Close() + + // Test writer error handling. + wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) + if _, err := wc.Write([]byte("hello")); err != nil { + t.Errorf("Write unexpectedly failed with %v", err) + } + if err = wc.Close(); err == nil { + t.Error("Close expected an error, found none") + } + + // Test deleting the copy object. + if err := bkt.Object(copyName).Delete(ctx); err != nil { + t.Errorf("Deletion of %v failed with %v", copyName, err) + } + // Deleting it a second time should return ErrObjectNotExist. + if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { + t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) + } + _, err = bkt.Object(copyName).Attrs(ctx) + if err != ErrObjectNotExist { + t.Errorf("Copy is expected to be deleted, stat errored with %v", err) + } + + // Test object composition. + compDst := bkt.Object("composed") + var compSrcs []*ObjectHandle + var wantContents []byte + for _, obj := range objects { + compSrcs = append(compSrcs, bkt.Object(obj)) + wantContents = append(wantContents, contents[obj]...) + } + if _, err := compDst.ComposeFrom(ctx, compSrcs, &ObjectAttrs{ + ContentType: "text/json", + }); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + rc, err = compDst.NewReader(ctx) + if err != nil { + t.Fatalf("compDst.NewReader: %v", err) + } + slurp, err = ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("compDst ioutil.ReadAll: %v", err) + } + defer rc.Close() + if !bytes.Equal(slurp, wantContents) { + t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) + } + if got, want := rc.ContentType(), "text/json"; got != want { + t.Errorf("Composed object content-type = %q, want %q", got, want) + } +} + +func testBucketList(t *testing.T, bkt *BucketHandle, objects []string) { + ctx := context.Background() + q := &Query{Prefix: "obj"} + missing := map[string]bool{} + for _, o := range objects { + missing[o] = true + } + for { + objs, err := bkt.List(ctx, q) + if err != nil { + t.Errorf("List: unexpected error: %v", err) + break + } + for _, oa := range objs.Results { + delete(missing, oa.Name) + } + if objs.Next == nil { + break + } + q = objs.Next + } + if len(missing) > 0 { + t.Errorf("bucket.List: missing %v", missing) + } +} + +func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { + ctx := context.Background() + // Collect the list of items we expect: ObjectAttrs in lexical order by name. + names := make([]string, len(objects)) + copy(names, objects) + sort.Strings(names) + var attrs []*ObjectAttrs + for _, name := range names { + attr, err := bkt.Object(name).Attrs(ctx) + if err != nil { + t.Errorf("Object(%q).Attrs: %v", name, err) + return + } + attrs = append(attrs, attr) + } + + it := bkt.Objects(ctx, &Query{Prefix: "obj"}) + msg, ok := testutil.TestIteratorNext(attrs, Done, func() (interface{}, error) { return it.Next() }) + if !ok { + t.Errorf("ObjectIterator.Next: %s", msg) + } + + // TODO(jba): test pagination. + // TODO(jba): test query.Delimiter != "" +} + +func TestACL(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + entity := ACLEntity("domain-google.com") + rule := ACLRule{Entity: entity, Role: RoleReader} + if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { + t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) + } + acl, err := bkt.DefaultObjectACL().List(ctx) + if err != nil { + t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) + } else if !hasRule(acl, rule) { + t.Errorf("default ACL missing %#v", rule) + } + aclObjects := []string{"acl1", "acl2"} + for _, obj := range aclObjects { + t.Logf("Writing %v", obj) + wc := bkt.Object(obj).NewWriter(ctx) + c := randomContents() + if _, err := wc.Write(c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + if err := wc.Close(); err != nil { + t.Errorf("Close for %v failed with %v", obj, err) + } + } + name := aclObjects[0] + o := bkt.Object(name) + acl, err = o.ACL().List(ctx) + if err != nil { + t.Errorf("Can't retrieve ACL of %v", name) + } else if !hasRule(acl, rule) { + t.Errorf("object ACL missing %+v", rule) + } + if err := o.ACL().Delete(ctx, entity); err != nil { + t.Errorf("object ACL: could not delete entity %s", entity) + } + // Delete the default ACL rule. We can't move this code earlier in the + // test, because the test depends on the fact that the object ACL inherits + // it. + if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { + t.Errorf("default ACL: could not delete entity %s", entity) + } + + entity2 := ACLEntity("user-jbd@google.com") + rule2 := ACLRule{Entity: entity2, Role: RoleReader} + if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { + t.Errorf("Error while putting bucket ACL rule: %v", err) + } + bACL, err := bkt.ACL().List(ctx) + if err != nil { + t.Errorf("Error while getting the ACL of the bucket: %v", err) + } else if !hasRule(bACL, rule2) { + t.Errorf("bucket ACL missing %+v", rule2) + } + if err := bkt.ACL().Delete(ctx, entity2); err != nil { + t.Errorf("Error while deleting bucket ACL rule: %v", err) + } + +} + +func hasRule(acl []ACLRule, rule ACLRule) bool { + for _, r := range acl { + if r == rule { + return true + } + } + return false +} + +func TestValidObjectNames(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + validNames := []string{ + "gopher", + "Гоферови", + "a", + strings.Repeat("a", 1024), + } + for _, name := range validNames { + w := bkt.Object(name).NewWriter(ctx) + if _, err := w.Write([]byte("data")); err != nil { + t.Errorf("Object %q write failed: %v. Want success", name, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("Object %q close failed: %v. Want success", name, err) + continue + } + defer bkt.Object(name).Delete(ctx) + } + + invalidNames := []string{ + "", // Too short. + strings.Repeat("a", 1025), // Too long. + "new\nlines", + "bad\xffunicode", + } + for _, name := range invalidNames { + w := bkt.Object(name).NewWriter(ctx) + // Invalid object names will either cause failure during Write or Close. + if _, err := w.Write([]byte("data")); err != nil { + continue + } + if err := w.Close(); err != nil { + continue + } + defer bkt.Object(name).Delete(ctx) + t.Errorf("%q should have failed. Didn't", name) + } +} + +func TestWriterContentType(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("content") + testCases := []struct { + content string + setType, wantType string + }{ + { + content: "It was the best of times, it was the worst of times.", + wantType: "text/plain; charset=utf-8", + }, + { + content: "My first page", + wantType: "text/html; charset=utf-8", + }, + { + content: "My first page", + setType: "text/html", + wantType: "text/html", + }, + { + content: "My first page", + setType: "image/jpeg", + wantType: "image/jpeg", + }, + } + for _, tt := range testCases { + w := obj.NewWriter(ctx) + w.ContentType = tt.setType + if _, err := w.Write([]byte(tt.content)); err != nil { + t.Errorf("w.Write: %v", err) + } + if err := w.Close(); err != nil { + t.Errorf("w.Close: %v", err) + } + attrs, err := obj.Attrs(ctx) + if err != nil { + t.Errorf("obj.Attrs: %v", err) + continue + } + if got := attrs.ContentType; got != tt.wantType { + t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) + } + } +} + +func TestZeroSizedObject(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("zero") + + // Check writing it works as expected. + w := obj.NewWriter(ctx) + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + defer obj.Delete(ctx) + + // Check we can read it too. + r, err := obj.NewReader(ctx) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + defer r.Close() + body, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ioutil.ReadAll: %v", err) + } + if len(body) != 0 { + t.Errorf("Body is %v, want empty []byte{}", body) + } +} + +// cleanup deletes the bucket used for testing, as well as old +// testing buckets that weren't cleaned previously. +func cleanup() error { + if testing.Short() { + return nil // Don't clean up in short mode. + } + ctx := context.Background() + client, bucket := config(ctx) + if client == nil { + return nil // Don't cleanup if we're not configured correctly. + } + defer client.Close() + if err := killBucket(ctx, client, bucket); err != nil { + return err + } + + // Delete buckets whose name begins with our test prefix, and which were + // created a while ago. (Unfortunately GCS doesn't provide last-modified + // time, which would be a better way to check for staleness.) + const expireAge = 24 * time.Hour + projectID := testutil.ProjID() + it := client.Buckets(ctx, projectID) + it.Prefix = projectID + testPrefix + for { + bktAttrs, err := it.Next() + if err == Done { + break + } + if err != nil { + return err + } + if time.Since(bktAttrs.Created) > expireAge { + log.Printf("deleting bucket %q, which more than %s old", bktAttrs.Name, expireAge) + if err := killBucket(ctx, client, bktAttrs.Name); err != nil { + return err + } + } + } + return nil +} + +// killBucket deletes a bucket and all its objects. +func killBucket(ctx context.Context, client *Client, bucketName string) error { + bkt := client.Bucket(bucketName) + // Bucket must be empty to delete. + it := bkt.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == Done { + break + } + if err != nil { + return err + } + if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err) + } + } + // GCS is eventually consistent, so this delete may fail because the + // replica still sees an object in the bucket. We log the error and expect + // a later test run to delete the bucket. + if err := bkt.Delete(ctx); err != nil { + log.Printf("deleting %q: %v", bucketName, err) + } + return nil +} + +func randomContents() []byte { + h := md5.New() + io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) + return h.Sum(nil) +} + +type zeros struct{} + +func (zeros) Read(p []byte) (int, error) { return len(p), nil } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 000000000..9e21648e5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" +) + +// Reader reads a Cloud Storage object. +type Reader struct { + body io.ReadCloser + remain, size int64 + contentType string +} + +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.body.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +func (r *Reader) Size() int64 { + return r.size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +func (r *Reader) ContentType() string { + return r.contentType +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 000000000..095ab263e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,952 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package storage contains a Google Cloud Storage client. +// +// This package is experimental and may make backwards-incompatible changes. +package storage // import "cloud.google.com/go/storage" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") + + // Done is returned by iterators in this package when they have no more items. + Done = iterator.Done +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +// AdminClient is a client type for performing admin operations on a project's +// buckets. +// +// Deprecated: Client has all of AdminClient's methods. +type AdminClient struct { + c *Client + projectID string +} + +// NewAdminClient creates a new AdminClient for a given project. +// +// Deprecated: use NewClient instead. +func NewAdminClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*AdminClient, error) { + c, err := NewClient(ctx, opts...) + if err != nil { + return nil, err + } + return &AdminClient{ + c: c, + projectID: projectID, + }, nil +} + +// Close closes the AdminClient. +func (c *AdminClient) Close() error { + return c.c.Close() +} + +// Create creates a Bucket in the project. +// If attrs is nil the API defaults will be used. +// +// Deprecated: use BucketHandle.Create instead. +func (c *AdminClient) CreateBucket(ctx context.Context, bucketName string, attrs *BucketAttrs) error { + return c.c.Bucket(bucketName).Create(ctx, c.projectID, attrs) +} + +// Delete deletes a Bucket in the project. +// +// Deprecated: use BucketHandle.Delete instead. +func (c *AdminClient) DeleteBucket(ctx context.Context, bucketName string) error { + return c.c.Bucket(bucketName).Delete(ctx) +} + +// Client is a client for interacting with Google Cloud Storage. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, _, err := transport.NewHTTPClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + c.hc = nil + return nil +} + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + acl *ACLHandle + defaultObjectACL *ACLHandle + + c *Client + name string +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// name must contain only lowercase letters, numbers, dashes, underscores, and +// dots. The full specification for valid bucket names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: &ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: &ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extention headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 []byte +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } else { + signBytes = opts.SignBytes + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + fmt.Fprintf(buf, "%s", strings.Join(opts.Headers, "\n")) + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + + acl *ACLHandle + conds []Condition +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return o.acl +} + +// WithConditions returns a copy of o using the provided conditions. +func (o *ObjectHandle) WithConditions(conds ...Condition) *ObjectHandle { + o2 := *o + o2.conds = conds + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { + if !utf8.ValidString(o.object) { + return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.conds, call); err != nil { + return nil, err + } + obj, err := call.Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, attrs ObjectAttrs) (*ObjectAttrs, error) { + if !utf8.ValidString(o.object) { + return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + call := o.c.raw.Objects.Patch(o.bucket, o.object, attrs.toRawObject(o.bucket)).Projection("full").Context(ctx) + if err := applyConds("Update", o.conds, call); err != nil { + return nil, err + } + obj, err := call.Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.conds, call); err != nil { + return err + } + err := call.Do() + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// CopyTo copies the object to the given dst. +// The copied object's attributes are overwritten by attrs if non-nil. +func (o *ObjectHandle) CopyTo(ctx context.Context, dst *ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) { + // TODO(djd): move bucket/object name validation to a single helper func. + if o.bucket == "" || dst.bucket == "" { + return nil, errors.New("storage: the source and destination bucket names must both be non-empty") + } + if o.object == "" || dst.object == "" { + return nil, errors.New("storage: the source and destination object names must both be non-empty") + } + if !utf8.ValidString(o.object) { + return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + if !utf8.ValidString(dst.object) { + return nil, fmt.Errorf("storage: dst name %q is not valid UTF-8", dst.object) + } + var rawObject *raw.Object + if attrs != nil { + attrs.Name = dst.object + if attrs.ContentType == "" { + return nil, errors.New("storage: attrs.ContentType must be non-empty") + } + rawObject = attrs.toRawObject(dst.bucket) + } + call := o.c.raw.Objects.Copy(o.bucket, o.object, dst.bucket, dst.object, rawObject).Projection("full").Context(ctx) + if err := applyConds("CopyTo destination", dst.conds, call); err != nil { + return nil, err + } + if err := applyConds("CopyTo source", toSourceConds(o.conds), call); err != nil { + return nil, err + } + obj, err := call.Do() + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// ComposeFrom concatenates the provided slice of source objects into a new +// object whose destination is the receiver. The provided attrs, if not nil, +// are used to set the attributes on the newly-created object. All source +// objects must reside within the same bucket as the destination. +func (o *ObjectHandle) ComposeFrom(ctx context.Context, srcs []*ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) { + if o.bucket == "" || o.object == "" { + return nil, errors.New("storage: the destination bucket and object names must be non-empty") + } + if len(srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + if attrs != nil { + req.Destination = attrs.toRawObject(o.bucket) + req.Destination.Name = o.object + } + + for _, src := range srcs { + if src.bucket != o.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", o.bucket, src.bucket) + } + if src.object == "" { + return nil, errors.New("storage: all source object names must be non-empty") + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := o.c.raw.Objects.Compose(o.bucket, o.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", o.conds, call); err != nil { + return nil, err + } + + obj, err := call.Do() + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { + if !utf8.ValidString(o.object) { + return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + if err := applyConds("NewReader", o.conds, objectsGetCall{req}); err != nil { + return nil, err + } + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + res, err := o.c.hc.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return nil, errors.New("storage: partial request not satisfied") + } + clHeader := res.Header.Get("X-Goog-Stored-Content-Length") + cl, err := strconv.ParseInt(clHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: can't parse content length %q: %v", clHeader, err) + } + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + return &Reader{ + body: body, + size: cl, + remain: remain, + contentType: res.Header.Get("Content-Type"), + }, nil +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created if an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + } +} + +// parseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { + var acl []*raw.ObjectAccessControl + if len(oldACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(oldACL)) + for i, rule := range oldACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return acl +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { + acl := toRawObjectACL(o.ACL) + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + Acl: acl, + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // MetaGeneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + MetaGeneration int64 + + // StorageClass is the storage class of the bucket. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD". This field is read-only. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + var crc32c uint32 + d, err := base64.StdEncoding.DecodeString(o.Crc32c) + if err == nil && len(d) == 4 { + crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + MetaGeneration: o.Metageneration, + StorageClass: o.StorageClass, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool + + // Cursor is a previously-returned page token + // representing part of the larger set of results to view. + // Optional. + // + // Deprecated: Use ObjectIterator.PageInfo().Token instead. + Cursor string + + // MaxResults is the maximum number of items plus prefixes + // to return. As duplicate prefixes are omitted, + // fewer total results may be returned than requested. + // The default page limit is used if it is negative or zero. + // + // Deprecated: Use ObjectIterator.PageInfo().MaxSize instead. + MaxResults int +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// A Condition constrains methods to act on specific generations of +// resources. +// +// Not all conditions or combinations of conditions are applicable to +// all methods. +type Condition interface { + // method is the high-level ObjectHandle method name, for + // error messages. call is the call object to modify. + modifyCall(method string, call interface{}) error +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, conds []Condition, call interface{}) error { + for _, cond := range conds { + if err := cond.modifyCall(method, call); err != nil { + return err + } + } + return nil +} + +// toSourceConds returns a slice of Conditions derived from Conds that instead +// function on the equivalent Source methods of a call. +func toSourceConds(conds []Condition) []Condition { + out := make([]Condition, 0, len(conds)) + for _, c := range conds { + switch c := c.(type) { + case genCond: + var m string + if strings.HasPrefix(c.method, "If") { + m = "IfSource" + c.method[2:] + } else { + m = "Source" + c.method + } + out = append(out, genCond{method: m, val: c.val}) + default: + // NOTE(djd): If the message from unsupportedCond becomes + // confusing, we'll need to find a way for Conditions to + // identify themselves. + out = append(out, unsupportedCond{}) + } + } + return out +} + +func Generation(gen int64) Condition { return genCond{"Generation", gen} } +func IfGenerationMatch(gen int64) Condition { return genCond{"IfGenerationMatch", gen} } +func IfGenerationNotMatch(gen int64) Condition { return genCond{"IfGenerationNotMatch", gen} } +func IfMetaGenerationMatch(gen int64) Condition { return genCond{"IfMetagenerationMatch", gen} } +func IfMetaGenerationNotMatch(gen int64) Condition { return genCond{"IfMetagenerationNotMatch", gen} } + +type genCond struct { + method string + val int64 +} + +func (g genCond) modifyCall(srcMethod string, call interface{}) error { + rv := reflect.ValueOf(call) + meth := rv.MethodByName(g.method) + if !meth.IsValid() { + return fmt.Errorf("%s: condition %s not supported", srcMethod, g.method) + } + meth.Call([]reflect.Value{reflect.ValueOf(g.val)}) + return nil +} + +type unsupportedCond struct{} + +func (unsupportedCond) modifyCall(srcMethod string, call interface{}) error { + return fmt.Errorf("%s: condition not supported", srcMethod) +} + +func appendParam(req *http.Request, k, v string) { + sep := "" + if req.URL.RawQuery != "" { + sep = "&" + } + req.URL.RawQuery += sep + url.QueryEscape(k) + "=" + url.QueryEscape(v) +} + +// objectsGetCall wraps an *http.Request for an object fetch call, but adds the methods +// that modifyCall searches for by name. (the same names as the raw, auto-generated API) +type objectsGetCall struct{ req *http.Request } + +func (c objectsGetCall) Generation(gen int64) { + appendParam(c.req, "generation", fmt.Sprint(gen)) +} +func (c objectsGetCall) IfGenerationMatch(gen int64) { + appendParam(c.req, "ifGenerationMatch", fmt.Sprint(gen)) +} +func (c objectsGetCall) IfGenerationNotMatch(gen int64) { + appendParam(c.req, "ifGenerationNotMatch", fmt.Sprint(gen)) +} +func (c objectsGetCall) IfMetagenerationMatch(gen int64) { + appendParam(c.req, "ifMetagenerationMatch", fmt.Sprint(gen)) +} +func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) { + appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen)) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/storage_test.go b/vendor/cloud.google.com/go/storage/storage_test.go new file mode 100644 index 000000000..9c256fc45 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage_test.go @@ -0,0 +1,654 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" +) + +func TestSignedURL(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "ITqNWQHr7ayIj%2B0Ds5%2FzUT2cWMQQouuFmu6L11Zd3kfNKvm3sjyGIzO" + + "gZsSUoter1SxP7BcrCzgqIZ9fQmgQnuIpqqLL4kcGmTbKsQS6hTknpJM%2F" + + "2lS4NY6UH1VXBgm2Tce28kz8rnmqG6svcGvtWuOgJsETeSIl1R9nAEIDCEq" + + "ZJzoOiru%2BODkHHkpoFjHWAwHugFHX%2B9EX4SxaytiN3oEy48HpYGWV0I" + + "h8NvU1hmeWzcLr41GnTADeCn7Eg%2Fb5H2GCNO70Cz%2Bw2fn%2BofLCUeR" + + "YQd%2FhES8oocv5kpHZkstc8s8uz3aKMsMauzZ9MOmGy%2F6VULBgIVvi6a" + + "AwEBIYOw%3D%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_PEMPrivateKey(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "B7XkS4dfmVDoe%2FoDeXZkWlYmg8u2kI0SizTrzL5%2B9RmKnb5j7Kf34DZ" + + "JL8Hcjr1MdPFLNg2QV4lEH86Gqgqt%2Fv3jFOTRl4wlzcRU%2FvV5c5HU8M" + + "qW0FZ0IDbqod2RdsMONLEO6yQWV2HWFrMLKl2yMFlWCJ47et%2BFaHe6v4Z" + + "EBc0%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_SignBytes(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + SignBytes: func(b []byte) ([]byte, error) { + return []byte("signed"), nil + }, + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "c2lnbmVk" // base64('signed') == 'c2lnbmVk' + if url != want { + t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want) + } +} + +func TestSignedURL_URLUnsafeObjectName(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object%20nam" + + "e%E7%95%8C?Expires=1033570800&GoogleAccessId=xxx%40clientid" + + "&Signature=bxORkrAm73INEMHktrE7VoUZQzVPvL5NFZ7noAI5zK%2BGSm" + + "%2BWFvsK%2FVnRGtYK9BK89jz%2BX4ZQd87nkMEJw1OsqmGNiepyzB%2B3o" + + "sUYrHyV7UnKs9bkQpBkqPFlfgK1o7oX4NJjA1oKjuHP%2Fj5%2FC15OPa3c" + + "vHV619BEb7vf30nAwQM%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_MissingOptions(t *testing.T) { + pk := dummyKey("rsa") + var tests = []struct { + opts *SignedURLOptions + errMsg string + }{ + { + &SignedURLOptions{}, + "missing required GoogleAccessID", + }, + { + &SignedURLOptions{GoogleAccessID: "access_id"}, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + PrivateKey: pk, + }, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + }, + "missing required expires", + }, + } + for _, test := range tests { + _, err := SignedURL("bucket", "name", test.opts) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected err: %v, found: %v", test.errMsg, err) + } + } +} + +func dummyKey(kind string) []byte { + slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) + if err != nil { + log.Fatal(err) + } + return slurp +} + +func TestCopyToMissingFields(t *testing.T) { + var tests = []struct { + srcBucket, srcName, destBucket, destName string + errMsg string + }{ + { + "mybucket", "", "mybucket", "destname", + "the source and destination object names must both be non-empty", + }, + { + "mybucket", "srcname", "mybucket", "", + "the source and destination object names must both be non-empty", + }, + { + "", "srcfile", "mybucket", "destname", + "the source and destination bucket names must both be non-empty", + }, + { + "mybucket", "srcfile", "", "destname", + "the source and destination bucket names must both be non-empty", + }, + } + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) + if err != nil { + panic(err) + } + for i, test := range tests { + src := client.Bucket(test.srcBucket).Object(test.srcName) + dst := client.Bucket(test.destBucket).Object(test.destName) + _, err := src.CopyTo(ctx, dst, nil) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) + } + } +} + +func TestObjectNames(t *testing.T) { + // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming + const maxLegalLength = 1024 + + type testT struct { + name, want string + } + tests := []testT{ + // Embedded characters important in URLs. + {"foo % bar", "foo%20%25%20bar"}, + {"foo ? bar", "foo%20%3F%20bar"}, + {"foo / bar", "foo%20/%20bar"}, + {"foo %?/ bar", "foo%20%25%3F/%20bar"}, + + // Non-Roman scripts + {"타코", "%ED%83%80%EC%BD%94"}, + {"世界", "%E4%B8%96%E7%95%8C"}, + + // Longest legal name + {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, + + // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode + {"foo \u000b bar", "foo%20%0B%20bar"}, + {"foo \u000c bar", "foo%20%0C%20bar"}, + {"foo \u0085 bar", "foo%20%C2%85%20bar"}, + {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, + {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, + + // Null byte. + {"foo \u0000 bar", "foo%20%00%20bar"}, + + // Non-control characters that are discouraged, but not forbidden, according to the documentation. + {"foo # bar", "foo%20%23%20bar"}, + {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, + + // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ + {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, + {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, + {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, + + // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) + {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, + {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, + {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, + } + + // C0 control characters not forbidden by the docs. + var runes []rune + for r := rune(0x01); r <= rune(0x1f); r++ { + if r != '\u000a' && r != '\u000d' { + runes = append(runes, r) + } + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) + + // C1 control characters, plus DEL. + runes = nil + for r := rune(0x7f); r <= rune(0x9f); r++ { + runes = append(runes, r) + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) + + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + + for _, test := range tests { + g, err := SignedURL("bucket-name", test.name, opts) + if err != nil { + t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) + } + if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { + t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) + } + } +} + +func TestCondition(t *testing.T) { + gotReq := make(chan *http.Request, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotReq <- r + if r.Method == "POST" { + w.WriteHeader(200) + } else { + w.WriteHeader(500) + } + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + obj := c.Bucket("buck").Object("obj") + dst := c.Bucket("dstbuck").Object("dst") + tests := []struct { + fn func() + want string + }{ + { + func() { obj.WithConditions(Generation(1234)).NewReader(ctx) }, + "GET /buck/obj?generation=1234", + }, + { + func() { obj.WithConditions(IfGenerationMatch(1234)).NewReader(ctx) }, + "GET /buck/obj?ifGenerationMatch=1234", + }, + { + func() { obj.WithConditions(IfGenerationNotMatch(1234)).NewReader(ctx) }, + "GET /buck/obj?ifGenerationNotMatch=1234", + }, + { + func() { obj.WithConditions(IfMetaGenerationMatch(1234)).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationMatch=1234", + }, + { + func() { obj.WithConditions(IfMetaGenerationNotMatch(1234)).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationNotMatch=1234", + }, + { + func() { obj.WithConditions(IfMetaGenerationNotMatch(1234)).Attrs(ctx) }, + "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", + }, + { + func() { obj.WithConditions(IfMetaGenerationMatch(1234)).Update(ctx, ObjectAttrs{}) }, + "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", + }, + { + func() { obj.WithConditions(Generation(1234)).Delete(ctx) }, + "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", + }, + { + func() { + w := obj.WithConditions(IfGenerationMatch(1234)).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", + }, + { + func() { + obj.WithConditions(IfGenerationMatch(1234)).CopyTo(ctx, dst.WithConditions(IfMetaGenerationMatch(5678)), nil) + }, + "POST /storage/v1/b/buck/o/obj/copyTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", + }, + } + + for i, tt := range tests { + tt.fn() + select { + case r := <-gotReq: + got := r.Method + " " + r.RequestURI + if got != tt.want { + t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) + } + case <-time.After(5 * time.Second): + t.Fatalf("%d. timeout", i) + } + if err != nil { + t.Fatal(err) + } + } + + // Test an error, too: + err = obj.WithConditions(Generation(1234)).NewWriter(ctx).Close() + if err == nil || !strings.Contains(err.Error(), "NewWriter: condition Generation not supported") { + t.Errorf("want error about unsupported condition; got %v", err) + } +} + +// Test object compose. +func TestObjectCompose(t *testing.T) { + gotURL := make(chan string, 1) + gotBody := make(chan []byte, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + body, _ := ioutil.ReadAll(r.Body) + gotURL <- r.URL.String() + gotBody <- body + w.Write([]byte("{}")) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + desc string + dst *ObjectHandle + srcs []*ObjectHandle + attrs *ObjectAttrs + wantReq raw.ComposeRequest + wantURL string + wantErr bool + }{ + { + desc: "basic case", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with object attrs", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + attrs: &ObjectAttrs{ + Name: "not-bar", + ContentType: "application/json", + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{ + Bucket: "foo", + Name: "bar", + ContentType: "application/json", + }, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with conditions", + dst: c.Bucket("foo").Object("bar").WithConditions(IfGenerationMatch(12), IfMetaGenerationMatch(34)), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").WithConditions(Generation(56)), + c.Bucket("foo").Object("quux").WithConditions(IfGenerationMatch(78)), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", + wantReq: raw.ComposeRequest{ + SourceObjects: []*raw.ComposeRequestSourceObjects{ + { + Name: "baz", + Generation: 56, + }, + { + Name: "quux", + ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: 78, + }, + }, + }, + }, + }, + { + desc: "no sources", + dst: c.Bucket("foo").Object("bar"), + wantErr: true, + }, + { + desc: "destination, no bucket", + dst: c.Bucket("").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "destination, no object", + dst: c.Bucket("foo").Object(""), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, different bucket", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("otherbucket").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, no object", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object(""), + }, + wantErr: true, + }, + { + desc: "destination, bad condition", + dst: c.Bucket("foo").Object("bar").WithConditions(Generation(12)), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, bad condition", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").WithConditions(IfMetaGenerationMatch(12)), + }, + wantErr: true, + }, + } + + for _, tt := range testCases { + _, err := tt.dst.ComposeFrom(ctx, tt.srcs, tt.attrs) + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if tt.wantErr { + continue + } + url, body := <-gotURL, <-gotBody + if url != tt.wantURL { + t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) + } + var req raw.ComposeRequest + if err := json.Unmarshal(body, &req); err != nil { + t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) + } + if !reflect.DeepEqual(req, tt.wantReq) { + // Print to JSON. + wantReq, _ := json.Marshal(tt.wantReq) + t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) + } + } +} + +// Test that ObjectIterator's Next and NextPage methods correctly terminate +// if there is nothing to iterate over. +func TestEmptyObjectIterator(t *testing.T) { + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Bucket("b").Objects(ctx, nil) + c := make(chan error, 1) + go func() { + _, err := it.Next() + c <- err + }() + select { + case err := <-c: + if err != Done { + t.Errorf("got %v, want Done", err) + } + case <-time.After(50 * time.Millisecond): + t.Error("timed out") + } +} + +// Test that BucketIterator's Next method correctly terminates if there is +// nothing to iterate over. +func TestEmptyBucketIterator(t *testing.T) { + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Buckets(ctx, "project") + c := make(chan error, 1) + go func() { + _, err := it.Next() + c <- err + }() + select { + case err := <-c: + if err != Done { + t.Errorf("got %v, want Done", err) + } + case <-time.After(50 * time.Millisecond): + t.Error("timed out") + } +} + +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { + ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + tlsConf := &tls.Config{InsecureSkipVerify: true} + tr := &http.Transport{ + TLSClientConfig: tlsConf, + DialTLS: func(netw, addr string) (net.Conn, error) { + return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + }, + } + return &http.Client{Transport: tr}, func() { + tr.CloseIdleConnections() + ts.Close() + } +} diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_pem b/vendor/cloud.google.com/go/storage/testdata/dummy_pem new file mode 100644 index 000000000..3428d4497 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_pem @@ -0,0 +1,39 @@ +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx +64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY +kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB +AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs +KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ +7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i +Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ +5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G +ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ +EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS +NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI +/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 +TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C +-----END RSA PRIVATE KEY----- +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE +AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex +MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y +NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 +czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU +ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp +gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx +6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ +BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB +ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T +lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv +qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 +-----END CERTIFICATE----- diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_rsa b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa new file mode 100644 index 000000000..4ce6678db --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 000000000..60937c071 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,129 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "io" + "unicode/utf8" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *ObjectAttrs +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + var mediaOpts []googleapi.MediaOption + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + + var resp *raw.Object + err := applyConds("NewWriter", w.o.conds, call) + if err == nil { + resp, err = call.Do() + } + if err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Object. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// ObjectAttrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/cloud.google.com/go/storage/writer_test.go b/vendor/cloud.google.com/go/storage/writer_test.go new file mode 100644 index 000000000..5c2537500 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer_test.go @@ -0,0 +1,52 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "net/http" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/api/option" +) + +type fakeTransport struct{} + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("error handling request") +} + +func TestErrorOnObjectsInsertCall(t *testing.T) { + ctx := context.Background() + hc := &http.Client{Transport: &fakeTransport{}} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Errorf("error when creating client: %v", err) + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + + // We can't check that the Write fails, since it depends on the write to the + // underling fakeTransport failing which is racy. + wc.Write([]byte("hello world")) + + // Close must always return an error though since it waits for the transport to + // have closed. + if err := wc.Close(); err == nil { + t.Errorf("expected error on close, got nil") + } +} diff --git a/vendor/cloud.google.com/go/trace/sampling.go b/vendor/cloud.google.com/go/trace/sampling.go new file mode 100644 index 000000000..33d4eae89 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/sampling.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type SamplingPolicy interface { + // Sample determines whether to sample the next request. If so, it also + // returns a string and rate describing the reason the request was chosen. + Sample() (sample bool, policy string, rate float64) +} + +type sampler struct { + fraction float64 + *rate.Limiter + *rand.Rand + sync.Mutex +} + +func (s *sampler) Sample() (sample bool, reason string, rate float64) { + s.Lock() + x := s.Float64() + s.Unlock() + return s.sample(time.Now(), x) +} + +// sample contains the a deterministic, time-independent logic of Sample. +func (s *sampler) sample(now time.Time, x float64) (bool, string, float64) { + if x >= s.fraction || !s.AllowN(now, 1) { + return false, "", 0.0 + } + if s.fraction < 1.0 { + return true, "fraction", s.fraction + } + return true, "qps", float64(s.Limit()) +} + +// NewLimitedSampler returns a sampling policy that traces a given fraction of +// requests, and enforces a limit on the number of traces per second. +// Returns a nil SamplingPolicy if either fraction or maxqps is zero. +func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) { + if !(fraction >= 0) { + return nil, fmt.Errorf("invalid fraction %f", fraction) + } + if !(maxqps >= 0) { + return nil, fmt.Errorf("invalid maxqps %f", maxqps) + } + if fraction == 0 || maxqps == 0 { + return nil, nil + } + // Set a limit on the number of accumulated "tokens", to limit bursts of + // traced requests. Use one more than a second's worth of tokens, or 100, + // whichever is smaller. + // See https://godoc.org/golang.org/x/time/rate#NewLimiter. + maxTokens := 100 + if maxqps < 99.0 { + maxTokens = 1 + int(maxqps) + } + var seed int64 + if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil { + seed = time.Now().UnixNano() + } + s := sampler{ + fraction: fraction, + Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens), + Rand: rand.New(rand.NewSource(seed)), + } + return &s, nil +} diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go new file mode 100644 index 000000000..9d2e963b8 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -0,0 +1,616 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package trace is a Google Stackdriver Trace library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/trace/api/#data_model for a discussion of traces +// and spans. +// +// To initialize a client that connects to the Stackdriver Trace server, use the +// NewClient function. Generally you will want to do this on program +// initialization. +// +// import "cloud.google.com/go/trace" +// ... +// traceClient, err = trace.NewClient(ctx, projectID) +// +// Incoming requests can contain a header which indicates that a trace is being +// performed for the corresponding operation. The SpanFromRequest function will +// look for this header in an *http.Request. If it is present, SpanFromRequest +// creates a span for the request; otherwise it returns nil. +// +// func handler(w http.ResponseWriter, r *http.Request) { +// span := traceClient.SpanFromRequest(r) +// defer span.Finish() +// ... +// } +// +// You can create a new span as a child of an existing span with NewChild. +// +// childSpan := span.NewChild(name) +// ... +// childSpan.Finish() +// +// When sending an HTTP request to another server, NewRemoteChild will create +// a span to represent the time the current program waits for the request to +// complete, and attach a header to the outgoing request so that the trace will +// be propagated to the destination server. +// +// childSpan := span.NewRemoteChild(&httpRequest) +// ... +// childSpan.Finish() +// +// Spans can contain a map from keys to values that have useful information +// about the span. The elements of this map are called labels. Some labels, +// whose keys all begin with the string "trace.cloud.google.com/", are set +// automatically in the following ways: +// - SpanFromRequest sets some labels to data about the incoming request. +// - NewRemoteChild sets some labels to data about the outgoing request. +// - Finish sets a label to a stack trace, if the stack trace option is enabled +// in the incoming trace header. +// - The WithResponse option sets some labels to data about a response. +// You can also set labels using SetLabel. If a label is given a value +// automatically and by SetLabel, the automatically-set value is used. +// +// span.SetLabel(key, value) +// +// The WithResponse option can be used when Finish is called. +// +// childSpan := span.NewRemoteChild(outgoingReq) +// resp, err := http.DefaultClient.Do(outgoingReq) +// ... +// childSpan.Finish(trace.WithResponse(resp)) +// +// When a span created by SpanFromRequest is finished, the finished spans in the +// corresponding trace -- the span itself and its descendants -- are uploaded +// to the Stackdriver Trace server using the *Client that created the span. +// Finish returns immediately, and uploading occurs asynchronously. You can use +// the FinishWait function instead to wait until uploading has finished. +// +// err := span.FinishWait() +// +// Using contexts to pass *trace.Span objects through your program will often +// be a better approach than passing them around explicitly. This allows trace +// spans, and other request-scoped or part-of-request-scoped values, to be +// easily passed through API boundaries. Various Google Cloud libraries will +// retrieve trace spans from contexts and automatically create child spans for +// API requests. +// See https://blog.golang.org/context for more discussion of contexts. +// A derived context containing a trace span can be created using NewContext. +// +// span := traceClient.SpanFromRequest(r) +// ctx = trace.NewContext(ctx, span) +// +// The span can be retrieved from a context elsewhere in the program using +// FromContext. +// +// func foo(ctx context.Context) { +// newSpan := trace.FromContext(ctx).NewChild("in foo") +// defer newSpan.Finish() +// ... +// } +// +// Client.SpanFromRequest returns a nil *Span if there is no trace header in +// the request. All of the exported functions on *Span do nothing when the +// *Span is nil, so you do not need to guard against nil receivers yourself. +// +// SpanFromRequest also returns nil if the *Client is nil, so you can disable +// tracing by not initializing your *Client variable. +package trace // import "cloud.google.com/go/trace" + +import ( + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + "google.golang.org/api/gensupport" + "google.golang.org/api/option" + "google.golang.org/api/transport" +) + +const ( + httpHeader = `X-Cloud-Trace-Context` + userAgent = `gcloud-golang-trace/20160501` + cloudPlatformScope = `https://www.googleapis.com/auth/cloud-platform` + spanKindClient = `RPC_CLIENT` + spanKindServer = `RPC_SERVER` + spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` + maxStackFrames = 20 + labelHost = `trace.cloud.google.com/http/host` + labelMethod = `trace.cloud.google.com/http/method` + labelStackTrace = `trace.cloud.google.com/stacktrace` + labelStatusCode = `trace.cloud.google.com/http/status_code` + labelURL = `trace.cloud.google.com/http/url` + labelSamplingPolicy = `trace.cloud.google.com/sampling_policy` + labelSamplingRate = `trace.cloud.google.com/sampling_rate` +) + +type contextKey struct{} + +type stackLabelValue struct { + Frames []stackFrame `json:"stack_frame"` +} + +type stackFrame struct { + Class string `json:"class_name,omitempty"` + Method string `json:"method_name"` + Filename string `json:"file_name"` + Line int64 `json:"line_number"` +} + +var ( + spanIDCounter uint64 + spanIDIncrement uint64 +) + +func init() { + // Set spanIDCounter and spanIDIncrement to random values. nextSpanID will + // return an arithmetic progression using these values, skipping zero. We set + // the LSB of spanIDIncrement to 1, so that the cycle length is 2^64. + binary.Read(rand.Reader, binary.LittleEndian, &spanIDCounter) + binary.Read(rand.Reader, binary.LittleEndian, &spanIDIncrement) + spanIDIncrement |= 1 + // Attach hook for autogenerated Google API calls. This will automatically + // create trace spans for API calls if there is a trace in the context. + gensupport.RegisterHook(requestHook) +} + +func requestHook(ctx context.Context, req *http.Request) func(resp *http.Response) { + span := FromContext(ctx) + if span == nil || req == nil { + return nil + } + span = span.NewRemoteChild(req) + return func(resp *http.Response) { + if resp != nil { + span.Finish(WithResponse(resp)) + } else { + span.Finish() + } + } +} + +// nextSpanID returns a new span ID. It will never return zero. +func nextSpanID() uint64 { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&spanIDCounter, spanIDIncrement) + } + return id +} + +// nextTraceID returns a new trace ID. +func nextTraceID() string { + id1 := nextSpanID() + id2 := nextSpanID() + return fmt.Sprintf("%016x%016x", id1, id2) +} + +// Client is a client for uploading traces to the Google Stackdriver Trace server. +type Client struct { + service *api.Service + projectID string + policy SamplingPolicy +} + +// NewClient creates a new Google Stackdriver Trace client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(cloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + hc, basePath, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("creating HTTP client for Google Stackdriver Trace API: %v", err) + } + apiService, err := api.New(hc) + if err != nil { + return nil, fmt.Errorf("creating Google Stackdriver Trace API client: %v", err) + } + if basePath != "" { + // An option set a basepath, so override api.New's default. + apiService.BasePath = basePath + } + return &Client{ + service: apiService, + projectID: projectID, + }, nil +} + +// SetSamplingPolicy sets the SamplingPolicy that determines how often traces +// are initiated by this client. +func (c *Client) SetSamplingPolicy(p SamplingPolicy) { + if c != nil { + c.policy = p + } +} + +// SpanFromRequest returns a new trace span. If the incoming HTTP request's +// headers don't specify that the request should be traced, and the sampling +// policy doesn't determine the request should be traced, the returned span +// will be nil. +// It also returns nil if the client is nil. +// When Finish is called on the returned span, the span and its descendants are +// uploaded to the Google Stackdriver Trace server. +func (client *Client) SpanFromRequest(r *http.Request) *Span { + if client == nil { + return nil + } + span := traceInfoFromRequest(r) + var ( + sample bool + reason string + rate float64 + ) + if client.policy != nil { + sample, reason, rate = client.policy.Sample() + } + if sample { + if span == nil { + t := &trace{ + traceID: nextTraceID(), + options: optionTrace, + client: client, + } + span = startNewChildWithRequest(r, t, 0 /* parentSpanID */) + span.span.Kind = spanKindServer + span.rootSpan = true + } + span.SetLabel(labelSamplingPolicy, reason) + span.SetLabel(labelSamplingRate, fmt.Sprint(rate)) + } + if span == nil { + return nil + } + span.trace.client = client + return span +} + +// NewContext returns a derived context containing the span. +func NewContext(ctx context.Context, s *Span) context.Context { + if s == nil { + return ctx + } + return context.WithValue(ctx, contextKey{}, s) +} + +// FromContext returns the span contained in the context, or nil. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +func traceInfoFromRequest(r *http.Request) *Span { + // See https://cloud.google.com/trace/docs/faq for the header format. + h := r.Header.Get(httpHeader) + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > 200 { + return nil + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return nil + } + traceID, h := h[:slash], h[slash+1:] + + // Parse the span id field. + semicolon := strings.Index(h, `;`) + if semicolon == -1 { + return nil + } + spanstr, h := h[:semicolon], h[semicolon+1:] + spanID, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return nil + } + + // Parse the options field. + if !strings.HasPrefix(h, "o=") { + return nil + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return nil + } + options := optionFlags(o) + if options&optionTrace == 0 { + return nil + } + + t := &trace{ + traceID: traceID, + options: options, + } + rootSpan := startNewChildWithRequest(r, t, spanID) + rootSpan.span.Kind = spanKindServer + rootSpan.rootSpan = true + return rootSpan +} + +type optionFlags uint32 + +const ( + optionTrace optionFlags = 1 << iota + optionStack +) + +type trace struct { + mu sync.Mutex + client *Client + traceID string + options optionFlags + spans []*Span // finished spans for this trace. +} + +// finish appends s to t.spans. If s is the root span, uploads the trace to the +// server. +func (t *trace) finish(s *Span, wait bool, opts ...FinishOption) error { + for _, o := range opts { + o.modifySpan(s) + } + s.end = time.Now() + t.mu.Lock() + t.spans = append(t.spans, s) + spans := t.spans + t.mu.Unlock() + if s.rootSpan { + if wait { + return t.upload(spans) + } + go func() { + err := t.upload(spans) + if err != nil { + log.Println("error uploading trace:", err) + } + }() + } + return nil +} + +func (t *trace) upload(spans []*Span) error { + apiSpans := make([]*api.TraceSpan, len(spans)) + for i, sp := range spans { + sp.span.StartTime = sp.start.In(time.UTC).Format(time.RFC3339Nano) + sp.span.EndTime = sp.end.In(time.UTC).Format(time.RFC3339Nano) + if t.options&optionStack != 0 { + sp.setStackLabel() + } + sp.SetLabel(labelHost, sp.host) + sp.SetLabel(labelURL, sp.url) + sp.SetLabel(labelMethod, sp.method) + if sp.statusCode != 0 { + sp.SetLabel(labelStatusCode, strconv.Itoa(sp.statusCode)) + } + apiSpans[i] = &sp.span + } + + traces := &api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: t.client.projectID, + TraceId: t.traceID, + Spans: apiSpans, + }, + }, + } + _, err := t.client.service.Projects.PatchTraces(t.client.projectID, traces).Do() + return err +} + +// Span contains information about one span of a trace. +type Span struct { + trace *trace + span api.TraceSpan + start time.Time + end time.Time + rootSpan bool + stack [maxStackFrames]uintptr + host string + method string + url string + statusCode int +} + +// NewChild creates a new span with the given name as a child of s. +// If s is nil, does nothing and returns nil. +func (s *Span) NewChild(name string) *Span { + if s == nil { + return nil + } + return startNewChild(name, s.trace, s.span.SpanId) +} + +// NewRemoteChild creates a new span as a child of s. +// Span details are set from an outgoing *http.Request r. +// A header is set in r so that the trace context is propagated to the destination. +// If s is nil, does nothing and returns nil. +func (s *Span) NewRemoteChild(r *http.Request) *Span { + if s == nil { + return nil + } + newSpan := startNewChildWithRequest(r, s.trace, s.span.SpanId) + r.Header[httpHeader] = []string{newSpan.spanHeader()} + return newSpan +} + +func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanId uint64) *Span { + newSpan := startNewChild(r.URL.Path, trace, parentSpanId) + if r.Host == "" { + newSpan.host = r.URL.Host + } else { + newSpan.host = r.Host + } + newSpan.method = r.Method + newSpan.url = r.URL.String() + return newSpan +} + +func startNewChild(name string, trace *trace, parentSpanId uint64) *Span { + newSpan := &Span{ + trace: trace, + span: api.TraceSpan{ + Kind: spanKindClient, + Name: name, + ParentSpanId: parentSpanId, + SpanId: nextSpanID(), + }, + start: time.Now(), + } + if trace.options&optionStack != 0 { + _ = runtime.Callers(1, newSpan.stack[:]) + } + return newSpan +} + +// TraceID returns the ID of the trace to which s belongs. +func (s *Span) TraceID() string { + if s == nil { + return "" + } + return s.trace.traceID +} + +// SetLabel sets the label for the given key to the given value. +// If the value is empty, the label for that key is deleted. +// If a label is given a value automatically and by SetLabel, the +// automatically-set value is used. +// If s is nil, does nothing. +func (s *Span) SetLabel(key, value string) { + if s == nil { + return + } + if value == "" { + if s.span.Labels != nil { + delete(s.span.Labels, key) + } + return + } + if s.span.Labels == nil { + s.span.Labels = make(map[string]string) + } + s.span.Labels[key] = value +} + +type FinishOption interface { + modifySpan(s *Span) +} + +type withResponse struct { + *http.Response +} + +// WithResponse returns an option that can be passed to Finish that indicates +// that some labels for the span should be set using the given *http.Response. +func WithResponse(resp *http.Response) FinishOption { + return withResponse{resp} +} +func (u withResponse) modifySpan(s *Span) { + if u.Response != nil { + s.statusCode = u.StatusCode + } +} + +// Finish declares that the span has finished. +// +// If s is nil, Finish does nothing and returns nil. +// +// If the option trace.WithResponse(resp) is passed, then some labels are set +// for s using information in the given *http.Response. This is useful when the +// span is for an outgoing http request; s will typically have been created by +// NewRemoteChild in this case. +// +// If s is a root span (one created by SpanFromRequest) then s, and all its +// descendant spans that have finished, are uploaded to the Google Stackdriver +// Trace server asynchronously. +func (s *Span) Finish(opts ...FinishOption) { + if s == nil { + return + } + s.trace.finish(s, false, opts...) +} + +// FinishWait is like Finish, but if s is a root span, it waits until uploading +// is finished, then returns an error if one occurred. +func (s *Span) FinishWait(opts ...FinishOption) error { + if s == nil { + return nil + } + return s.trace.finish(s, true, opts...) +} + +func (s *Span) spanHeader() string { + // See https://cloud.google.com/trace/docs/faq for the header format. + return fmt.Sprintf("%s/%d;o=%d", s.trace.traceID, s.span.SpanId, s.trace.options) +} + +func (s *Span) setStackLabel() { + var stack stackLabelValue + lastSigPanic, inTraceLibrary := false, true + for _, pc := range s.stack { + if pc == 0 { + break + } + if !lastSigPanic { + pc-- + } + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + // Name has one of the following forms: + // path/to/package.Foo + // path/to/package.(Type).Foo + // For the first form, we store the whole name in the Method field of the + // stack frame. For the second form, we set the Method field to "Foo" and + // the Class field to "path/to/package.(Type)". + name := fn.Name() + if inTraceLibrary && !strings.HasPrefix(name, "cloud.google.com/go/trace.") { + inTraceLibrary = false + } + var class string + if i := strings.Index(name, ")."); i != -1 { + class, name = name[:i+1], name[i+2:] + } + frame := stackFrame{ + Class: class, + Method: name, + Filename: file, + Line: int64(line), + } + if inTraceLibrary && len(stack.Frames) == 1 { + stack.Frames[0] = frame + } else { + stack.Frames = append(stack.Frames, frame) + } + lastSigPanic = fn.Name() == "runtime.sigpanic" + } + if label, err := json.Marshal(stack); err == nil { + s.SetLabel(labelStackTrace, string(label)) + } +} diff --git a/vendor/cloud.google.com/go/trace/trace_test.go b/vendor/cloud.google.com/go/trace/trace_test.go new file mode 100644 index 000000000..6fb1169b0 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace_test.go @@ -0,0 +1,420 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/option" +) + +const testProjectID = "testproject" + +type fakeRoundTripper struct { + reqc chan *http.Request +} + +func newFakeRoundTripper() *fakeRoundTripper { + return &fakeRoundTripper{reqc: make(chan *http.Request)} +} + +func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + rt.reqc <- r + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func newTestClient(rt http.RoundTripper) *Client { + t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) + if err != nil { + panic(err) + } + return t +} + +// makeRequests makes some requests. +// req is an incoming request used to construct the trace. traceClient is the +// client used to upload the trace. rt is the trace client's http client's +// transport. This is used to retrieve the trace uploaded by the client, if +// any. If expectTrace is true, we expect a trace will be uploaded. If +// synchronous is true, the call to Finish is expected not to return before the +// client has uploaded any traces. +func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { + span := traceClient.SpanFromRequest(req) + ctx := NewContext(context.Background(), span) + + // An HTTP request. + { + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + resp := &http.Response{StatusCode: 200} + s := span.NewRemoteChild(req2) + s.Finish(WithResponse(resp)) + } + + // An autogenerated API call. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + computeClient, err := compute.New(hc) + if err != nil { + t.Fatal(err) + } + _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() + if err != nil { + t.Fatal(err) + } + } + + // A cloud library call that uses the autogenerated API. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + var objAttrsList []*storage.ObjectAttrs + it := storageClient.Bucket("testbucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != storage.Done { + t.Fatal(err) + } + if err == storage.Done { + break + } + objAttrsList = append(objAttrsList, objAttrs) + } + } + + done := make(chan struct{}) + go func() { + if synchronous { + err := span.FinishWait() + if err != nil { + t.Errorf("Unexpected error from span.FinishWait: %v", err) + } + } else { + span.Finish() + } + done <- struct{}{} + }() + if !expectTrace { + <-done + select { + case <-rt.reqc: + t.Errorf("Got a trace, expected none.") + case <-time.After(5 * time.Millisecond): + } + return nil + } else if !synchronous { + <-done + return <-rt.reqc + } else { + select { + case <-done: + t.Errorf("Synchronous Finish didn't wait for trace upload.") + return <-rt.reqc + case <-time.After(5 * time.Millisecond): + r := <-rt.reqc + <-done + return r + } + } +} + +func TestTrace(t *testing.T) { + testTrace(t, false) +} + +func TestTraceWithWait(t *testing.T) { + testTrace(t, true) +} + +func testTrace(t *testing.T, synchronous bool) { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Header["X-Cloud-Trace-Context"] = []string{`0123456789ABCDEF0123456789ABCDEF/42;o=3`} + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + + uploaded := makeRequests(t, req, traceClient, rt, synchronous, true) + + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "/storage/v1/b/testbucket/o", + }, + { + Kind: "RPC_SERVER", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/url": "http://example.com/foo", + }, + Name: "/foo", + }, + }, + TraceId: "0123456789ABCDEF0123456789ABCDEF", + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 42 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 42) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !reflect.DeepEqual(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s want %s", got, want) + } +} + +func TestNoTrace(t *testing.T) { + testNoTrace(t, false) +} + +func TestNoTraceWithWait(t *testing.T) { + testNoTrace(t, true) +} + +func testNoTrace(t *testing.T, synchronous bool) { + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42`, + `0123456789ABCDEF0123456789ABCDEF`, + ``, + } { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if header != "" { + req.Header["X-Cloud-Trace-Context"] = []string{header} + } + if err != nil { + t.Fatal(err) + } + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + uploaded := makeRequests(t, req, traceClient, rt, synchronous, false) + if uploaded != nil { + t.Errorf("Got a trace, expected none.") + } + } +} + +func TestSample(t *testing.T) { + // A deterministic test of the sampler logic. + type testCase struct { + rate float64 + maxqps float64 + want int + } + const delta = 25 * time.Millisecond + for _, test := range []testCase{ + // qps won't matter, so we will sample half of the 79 calls + {0.50, 100, 40}, + // with 1 qps and a burst of 2, we will sample twice in second #1, once in the partial second #2 + {0.50, 1, 3}, + } { + sp, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + sampled := 0 + tm := time.Now() + for i := 0; i < 80; i++ { + if ok, _, _ := s.sample(tm, float64(i%2)); ok { + sampled++ + } + tm = tm.Add(delta) + } + if sampled != test.want { + t.Errorf("rate=%f, maxqps=%f: got %d samples, want %d", test.rate, test.maxqps, sampled, test.want) + } + } +} + +func TestSampling(t *testing.T) { + // This scope tests sampling in a larger context, with real time and randomness. + wg := sync.WaitGroup{} + type testCase struct { + rate float64 + maxqps float64 + expectedRange [2]int + } + for _, test := range []testCase{ + {0, 5, [2]int{0, 0}}, + {5, 0, [2]int{0, 0}}, + {0.50, 100, [2]int{20, 60}}, + {0.50, 1, [2]int{3, 4}}, // Windows, with its less precise clock, sometimes gives 4. + } { + wg.Add(1) + go func(test testCase) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + p, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + ticker := time.NewTicker(25 * time.Millisecond) + sampled := 0 + for i := 0; i < 79; i++ { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + select { + case <-rt.reqc: + <-ticker.C + sampled++ + case <-ticker.C: + } + } + ticker.Stop() + if test.expectedRange[0] > sampled || sampled > test.expectedRange[1] { + t.Errorf("rate=%f, maxqps=%f: got %d samples want ∈ %v", test.rate, test.maxqps, sampled, test.expectedRange) + } + wg.Done() + }(test) + } + wg.Wait() +} diff --git a/vendor/cloud.google.com/go/vision/annotations.go b/vendor/cloud.google.com/go/vision/annotations.go new file mode 100644 index 000000000..5de7de05b --- /dev/null +++ b/vendor/cloud.google.com/go/vision/annotations.go @@ -0,0 +1,259 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Annotations contains all the annotations performed by the API on a single image. +// A nil field indicates either that the corresponding feature was not requested, +// or that annotation failed for that feature. +type Annotations struct { + // Faces holds the results of face detection. + Faces []*FaceAnnotation + // Landmarks holds the results of landmark detection. + Landmarks []*EntityAnnotation + // Logos holds the results of logo detection. + Logos []*EntityAnnotation + // Labels holds the results of label detection. + Labels []*EntityAnnotation + // Texts holds the results of text detection. + Texts []*EntityAnnotation + // SafeSearch holds the results of safe-search detection. + SafeSearch *SafeSearchAnnotation + // ImageProps contains properties of the annotated image. + ImageProps *ImageProps + + // If non-nil, then one or more of the attempted annotations failed. + // Non-nil annotations are guaranteed to be correct, even if Error is + // non-nil. + Error error +} + +func annotationsFromProto(res *pb.AnnotateImageResponse) *Annotations { + as := &Annotations{} + for _, a := range res.FaceAnnotations { + as.Faces = append(as.Faces, faceAnnotationFromProto(a)) + } + for _, a := range res.LandmarkAnnotations { + as.Landmarks = append(as.Landmarks, entityAnnotationFromProto(a)) + } + for _, a := range res.LogoAnnotations { + as.Logos = append(as.Logos, entityAnnotationFromProto(a)) + } + for _, a := range res.LabelAnnotations { + as.Labels = append(as.Labels, entityAnnotationFromProto(a)) + } + for _, a := range res.TextAnnotations { + as.Texts = append(as.Texts, entityAnnotationFromProto(a)) + } + as.SafeSearch = safeSearchAnnotationFromProto(res.SafeSearchAnnotation) + as.ImageProps = imagePropertiesFromProto(res.ImagePropertiesAnnotation) + if res.Error != nil { + // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC + // error because it preserves the code as a separate field. + // TODO(jba): preserve the details field. + as.Error = grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) + } + return as +} + +// A FaceAnnotation describes the results of face detection on an image. +type FaceAnnotation struct { + // BoundingPoly is the bounding polygon around the face. The coordinates of + // the bounding box are in the original image's scale, as returned in + // ImageParams. The bounding box is computed to "frame" the face in + // accordance with human expectations. It is based on the landmarker + // results. Note that one or more x and/or y coordinates may not be + // generated in the BoundingPoly (the polygon will be unbounded) if only a + // partial face appears in the image to be annotated. + BoundingPoly []image.Point + + // FDBoundingPoly is tighter than BoundingPoly, and + // encloses only the skin part of the face. Typically, it is used to + // eliminate the face from any image analysis that detects the "amount of + // skin" visible in an image. It is not based on the landmarker results, only + // on the initial face detection, hence the fd (face detection) prefix. + FDBoundingPoly []image.Point + + // Landmarks are detected face landmarks. + Face FaceLandmarks + + // RollAngle indicates the amount of clockwise/anti-clockwise rotation of + // the face relative to the image vertical, about the axis perpendicular to + // the face. Range [-180,180]. + RollAngle float32 + + // PanAngle is the yaw angle: the leftward/rightward angle that the face is + // pointing, relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + PanAngle float32 + + // TiltAngle is the pitch angle: the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + TiltAngle float32 + + // DetectionConfidence is the detection confidence. The range is [0, 1]. + DetectionConfidence float32 + + // LandmarkingConfidence is the face landmarking confidence. The range is [0, 1]. + LandmarkingConfidence float32 + + // Likelihoods expresses the likelihood of various aspects of the face. + Likelihoods *FaceLikelihoods +} + +func faceAnnotationFromProto(pfa *pb.FaceAnnotation) *FaceAnnotation { + fa := &FaceAnnotation{ + BoundingPoly: boundingPolyFromProto(pfa.BoundingPoly), + FDBoundingPoly: boundingPolyFromProto(pfa.FdBoundingPoly), + RollAngle: pfa.RollAngle, + PanAngle: pfa.PanAngle, + TiltAngle: pfa.TiltAngle, + DetectionConfidence: pfa.DetectionConfidence, + LandmarkingConfidence: pfa.LandmarkingConfidence, + Likelihoods: &FaceLikelihoods{ + Joy: Likelihood(pfa.JoyLikelihood), + Sorrow: Likelihood(pfa.SorrowLikelihood), + Anger: Likelihood(pfa.AngerLikelihood), + Surprise: Likelihood(pfa.SurpriseLikelihood), + UnderExposed: Likelihood(pfa.UnderExposedLikelihood), + Blurred: Likelihood(pfa.BlurredLikelihood), + Headwear: Likelihood(pfa.HeadwearLikelihood), + }, + } + populateFaceLandmarks(pfa.Landmarks, &fa.Face) + return fa +} + +// An EntityAnnotation describes the results of a landmark, label, logo or text +// detection on an image. +type EntityAnnotation struct { + // ID is an opaque entity ID. Some IDs might be available in Knowledge Graph(KG). + // For more details on KG please see: + // https://developers.google.com/knowledge-graph/ + ID string + + // Locale is the language code for the locale in which the entity textual + // description (next field) is expressed. + Locale string + + // Description is the entity textual description, expressed in the language of Locale. + Description string + + // Score is the overall score of the result. Range [0, 1]. + Score float32 + + // Confidence is the accuracy of the entity detection in an image. + // For example, for an image containing the Eiffel Tower, this field represents + // the confidence that there is a tower in the query image. Range [0, 1]. + Confidence float32 + + // Topicality is the relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of 'tower' to an image containing + // 'Eiffel Tower' is likely higher than an image containing a distant towering + // building, though the confidence that there is a tower may be the same. + // Range [0, 1]. + Topicality float32 + + // BoundingPoly is the image region to which this entity belongs. Not filled currently + // for label detection. For text detection, BoundingPolys + // are produced for the entire text detected in an image region, followed by + // BoundingPolys for each word within the detected text. + BoundingPoly []image.Point + + // Locations contains the location information for the detected entity. + // Multiple LatLng structs can be present since one location may indicate the + // location of the scene in the query image, and another the location of the + // place where the query image was taken. Location information is usually + // present for landmarks. + Locations []LatLng + + // Properties are additional optional Property fields. + // For example a different kind of score or string that qualifies the entity. + Properties []Property +} + +func entityAnnotationFromProto(e *pb.EntityAnnotation) *EntityAnnotation { + var locs []LatLng + for _, li := range e.Locations { + locs = append(locs, latLngFromProto(li.LatLng)) + } + var props []Property + for _, p := range e.Properties { + props = append(props, propertyFromProto(p)) + } + return &EntityAnnotation{ + ID: e.Mid, + Locale: e.Locale, + Description: e.Description, + Score: e.Score, + Confidence: e.Confidence, + Topicality: e.Topicality, + BoundingPoly: boundingPolyFromProto(e.BoundingPoly), + Locations: locs, + Properties: props, + } +} + +// SafeSearchAnnotation describes the results of a SafeSearch detection on an image. +type SafeSearchAnnotation struct { + // Adult is the likelihood that the image contains adult content. + Adult Likelihood + + // Spoof is the likelihood that an obvious modification was made to the + // image's canonical version to make it appear funny or offensive. + Spoof Likelihood + + // Medical is the likelihood that this is a medical image. + Medical Likelihood + + // Violence is the likelihood that this image represents violence. + Violence Likelihood +} + +func safeSearchAnnotationFromProto(s *pb.SafeSearchAnnotation) *SafeSearchAnnotation { + if s == nil { + return nil + } + return &SafeSearchAnnotation{ + Adult: Likelihood(s.Adult), + Spoof: Likelihood(s.Spoof), + Medical: Likelihood(s.Medical), + Violence: Likelihood(s.Violence), + } +} + +// ImageProps describes properties of the image itself, like the dominant colors. +type ImageProps struct { + // DominantColors describes the dominant colors of the image. + DominantColors []*ColorInfo +} + +func imagePropertiesFromProto(ip *pb.ImageProperties) *ImageProps { + if ip == nil || ip.DominantColors == nil { + return nil + } + var cinfos []*ColorInfo + for _, ci := range ip.DominantColors.Colors { + cinfos = append(cinfos, colorInfoFromProto(ci)) + } + return &ImageProps{DominantColors: cinfos} +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/doc.go b/vendor/cloud.google.com/go/vision/apiv1/doc.go new file mode 100644 index 000000000..a96cffcdc --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/doc.go @@ -0,0 +1,23 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an experimental, auto-generated package for the +// vision API. +// +// Integrates Google Vision features, including image labeling, face, logo, +// and landmark detection, optical character recognition (OCR), and detection +// of explicit content, into applications. +package vision // import "cloud.google.com/go/vision/apiv1" diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go new file mode 100644 index 000000000..cb259d57c --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go @@ -0,0 +1,136 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "fmt" + "runtime" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of this client. +type CallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + + return &CallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with ImageAnnotator. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + metadata map[string][]string +} + +// NewClient creates a new image_annotator service client. +// +// Service that performs Google Cloud Vision API detection tasks, such as face, +// landmark, logo, label, and text detection, over client images, and returns +// detected entities from the images. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + client: visionpb.NewImageAnnotatorClient(conn), + CallOptions: defaultCallOptions(), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(name, version string) { + c.metadata = map[string][]string{ + "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, + } +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *Client) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = metadata.NewContext(ctx, c.metadata) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.BatchAnnotateImages(ctx, req) + return err + }, c.CallOptions.BatchAnnotateImages...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go new file mode 100644 index 000000000..fbd9bdef9 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := vision.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/vision.go b/vendor/cloud.google.com/go/vision/apiv1/vision.go new file mode 100644 index 000000000..d72d1f2f1 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/vision.go @@ -0,0 +1,21 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +const ( + gapicNameVersion = "gapic/0.1.0" +) diff --git a/vendor/cloud.google.com/go/vision/doc.go b/vendor/cloud.google.com/go/vision/doc.go new file mode 100644 index 000000000..7fe6e1521 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/doc.go @@ -0,0 +1,96 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package vision provides a client for the Google Cloud Vision API. + +Google Cloud Vision allows easy integration of vision detection features +into developer applications, including image labeling, face and landmark +detection, optical character recognition (OCR), and tagging of explicit +content. For more information about Cloud Vision, read the Google Cloud Vision API +Documentation at https://cloud.google.com/vision/docs. + +Creating Images + +The Cloud Vision API supports a variety of image file formats, including JPEG, +PNG8, PNG24, Animated GIF (first frame only), and RAW. See +https://cloud.google.com/vision/docs/image-best-practices#image_types for the +complete list of formats. Be aware that Cloud Vision sets upper limits on file +size as well as on the total combined size of all images in a request. Reducing +your file size can significantly improve throughput; however, be careful not to +reduce image quality in the process. See +https://cloud.google.com/vision/docs/image-best-practices#image_sizing for +current file size limits. + +Creating an Image instance does not perform an API request. + +Use NewImageFromReader to obtain an image from any io.Reader, such as an open file: + + f, err := os.Open("path/to/image.jpg") + if err != nil { ... } + defer f.Close() + img, err := vision.NewImageFromReader(f) + if err != nil { ... } + +Use NewImageFromGCS to refer to an image in Google Cloud Storage: + + img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") + +Annotating Images + +Client.Annotate is the most general method in the package. It can run multiple +detections on multiple images with a single API call. + +To describe the detections you want to perform on an image, create an +AnnotateRequest and specify the maximum number of results to return for each +detection of interest. The exceptions are safe search and image properties, +where a boolean is used instead. + + resultSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ + Image: img, + MaxLogos: 5, + MaxTexts: 100, + SafeSearch: true, + }) + if err != nil { ... } + +You can pass as many AnnotateRequests as desired to client.Annotate. The return +value is a slice of an Annotations. Each Annotations value may contain an Error +along with one or more successful results. The failed detections will have a nil annotation. + + result := resultSlice[0] + if result.Error != nil { ... } // some detections failed + for _, logo := range result.Logos { ... } + for _, text := range result.Texts { ... } + if result.SafeSearch != nil { ... } + +Other methods on Client run a single detection on a single image. For instance, +Client.DetectFaces will run face detection on the provided Image. These methods +return a single annotation of the appropriate type (for example, DetectFaces +returns a FaceAnnotation). The error return value incorporates both API call +errors and the detection errors stored in Annotations.Error, simplifying your +logic. + + faces, err := client.DetectFaces(ctx, 10) // maximum of 10 faces + if err != nil { ... } + +Here faces is a slice of FaceAnnotations. The Face field of each FaceAnnotation +provides easy access to the positions of facial features: + + fmt.Println(faces[0].Face.Nose.Tip) + fmt.Println(faces[0].Face.Eyes.Left.Pupil) + +This package is experimental and subject to API changes. +*/ +package vision // import "cloud.google.com/go/vision" diff --git a/vendor/cloud.google.com/go/vision/examples_test.go b/vendor/cloud.google.com/go/vision/examples_test.go new file mode 100644 index 000000000..cfdcb4f55 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/examples_test.go @@ -0,0 +1,99 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/vision" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func Example_NewImageFromReader() { + f, err := os.Open("path/to/image.jpg") + if err != nil { + // TODO: handle error. + } + img, err := vision.NewImageFromReader(f) + if err != nil { + // TODO: handle error. + } + fmt.Println(img) +} + +func Example_NewImageFromGCS() { + img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") + fmt.Println(img) +} + +func ExampleClient_Annotate_oneImage() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + annsSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ + Image: vision.NewImageFromGCS("gs://my-bucket/my-image.png"), + MaxLogos: 100, + MaxTexts: 100, + SafeSearch: true, + }) + if err != nil { + // TODO: handle error. + } + anns := annsSlice[0] + if anns.Logos != nil { + fmt.Println(anns.Logos) + } + if anns.Texts != nil { + fmt.Println(anns.Texts) + } + if anns.SafeSearch != nil { + fmt.Println(anns.SafeSearch) + } + if anns.Error != nil { + fmt.Printf("at least one of the features failed: %v", anns.Error) + } +} + +func ExampleClient_DetectFaces() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + img := vision.NewImageFromGCS("gs://my-bucket/my-image.png") + faces, err := client.DetectFaces(ctx, img, 10) + if err != nil { + // TODO: handle error. + } + fmt.Println(faces[0].Face.Nose.Tip) + fmt.Println(faces[0].Face.Eyes.Left.Pupil) +} diff --git a/vendor/cloud.google.com/go/vision/face.go b/vendor/cloud.google.com/go/vision/face.go new file mode 100644 index 000000000..a7de3e5c6 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/face.go @@ -0,0 +1,172 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + + "github.com/golang/geo/r3" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// FaceLandmarks contains the positions of facial features detected by the service. +// TODO(jba): write doc for all +type FaceLandmarks struct { + Eyebrows Eyebrows + Eyes Eyes + Ears Ears + Nose Nose + Mouth Mouth + Chin Chin + Forehead *r3.Vector +} + +type Eyebrows struct { + Left, Right Eyebrow +} + +type Eyebrow struct { + Top, Left, Right *r3.Vector +} + +type Eyes struct { + Left, Right Eye +} + +type Eye struct { + Left, Right, Top, Bottom, Center, Pupil *r3.Vector +} + +type Ears struct { + Left, Right *r3.Vector +} + +type Nose struct { + Left, Right, Top, Bottom, Tip *r3.Vector +} + +type Mouth struct { + Left, Center, Right, UpperLip, LowerLip *r3.Vector +} + +type Chin struct { + Left, Center, Right *r3.Vector +} + +// FaceLikelihoods expresses the likelihood of various aspects of a face. +type FaceLikelihoods struct { + // Joy is the likelihood that the face expresses joy. + Joy Likelihood + + // Sorrow is the likelihood that the face expresses sorrow. + Sorrow Likelihood + + // Anger is the likelihood that the face expresses anger. + Anger Likelihood + + // Surprise is the likelihood that the face expresses surprise. + Surprise Likelihood + + // UnderExposed is the likelihood that the face is under-exposed. + UnderExposed Likelihood + + // Blurred is the likelihood that the face is blurred. + Blurred Likelihood + + // Headwear is the likelihood that the face has headwear. + Headwear Likelihood +} + +func populateFaceLandmarks(landmarks []*pb.FaceAnnotation_Landmark, face *FaceLandmarks) { + for _, lm := range landmarks { + pos := &r3.Vector{ + X: float64(lm.Position.X), + Y: float64(lm.Position.Y), + Z: float64(lm.Position.Z), + } + switch lm.Type { + case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Left.Top = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Right.Top = pos + case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: + face.Nose.Top = pos + case pb.FaceAnnotation_Landmark_NOSE_TIP: + face.Nose.Tip = pos + case pb.FaceAnnotation_Landmark_UPPER_LIP: + face.Mouth.UpperLip = pos + case pb.FaceAnnotation_Landmark_LOWER_LIP: + face.Mouth.LowerLip = pos + case pb.FaceAnnotation_Landmark_MOUTH_LEFT: + face.Mouth.Left = pos + case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: + face.Mouth.Right = pos + case pb.FaceAnnotation_Landmark_MOUTH_CENTER: + face.Mouth.Center = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: + face.Nose.Right = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: + face.Nose.Left = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: + face.Nose.Bottom = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE: + face.Eyes.Left.Center = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE: + face.Eyes.Right.Center = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: + face.Eyes.Left.Top = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: + face.Eyes.Left.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Left.Bottom = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: + face.Eyes.Left.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: + face.Eyes.Right.Top = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: + face.Eyes.Right.Right = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Right.Bottom = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: + face.Eyes.Right.Left = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: + face.Eyes.Left.Pupil = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: + face.Eyes.Right.Pupil = pos + case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: + face.Ears.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: + face.Ears.Right = pos + case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: + face.Forehead = pos + case pb.FaceAnnotation_Landmark_CHIN_GNATHION: + face.Chin.Center = pos + case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: + face.Chin.Left = pos + case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: + face.Chin.Right = pos + default: + log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) + } + } +} diff --git a/vendor/cloud.google.com/go/vision/geometry.go b/vendor/cloud.google.com/go/vision/geometry.go new file mode 100644 index 000000000..35f90b8ee --- /dev/null +++ b/vendor/cloud.google.com/go/vision/geometry.go @@ -0,0 +1,36 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func pointFromProto(v *pb.Vertex) image.Point { + return image.Point{X: int(v.X), Y: int(v.Y)} +} + +func boundingPolyFromProto(b *pb.BoundingPoly) []image.Point { + if b == nil { + return nil + } + var ps []image.Point + for _, v := range b.Vertices { + ps = append(ps, pointFromProto(v)) + } + return ps +} diff --git a/vendor/cloud.google.com/go/vision/image.go b/vendor/cloud.google.com/go/vision/image.go new file mode 100644 index 000000000..3bcb2362e --- /dev/null +++ b/vendor/cloud.google.com/go/vision/image.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "io" + "io/ioutil" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// An Image represents the contents of an image to run detection algorithms on, +// along with metadata. Images may be described by their raw bytes, or by a +// reference to a a Google Cloude Storage (GCS) object. +type Image struct { + // Exactly one of content and gcsURI will be non-zero. + content []byte // raw image bytes + gcsURI string // URI of the form "gs://BUCKET/OBJECT" + + // Rect is a rectangle on the Earth's surface represented by the + // image. It is optional. + Rect *LatLngRect + + // LanguageHints is a list of languages to use for text detection. In most + // cases, leaving this field nil yields the best results since it enables + // automatic language detection. For languages based on the Latin alphabet, + // setting LanguageHints is not needed. In rare cases, when the language of + // the text in the image is known, setting a hint will help get better + // results (although it will be a significant hindrance if the hint is + // wrong). Text detection returns an error if one or more of the specified + // languages is not one of the supported languages (See + // https://cloud.google.com/translate/v2/translate-reference#supported_languages). + LanguageHints []string +} + +// NewImageFromReader reads the bytes of an image from rc, then closes rc. +// +// You may optionally set Rect and LanguageHints on the returned Image before +// using it. +func NewImageFromReader(r io.ReadCloser) (*Image, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + if err := r.Close(); err != nil { + return nil, err + } + return &Image{content: bytes}, nil +} + +// NewImageFromGCS returns an image that refers to an object in Google Cloud Storage. +// gcsPath must be a valid Google Cloud Storage URI of the form "gs://BUCKET/OBJECT". +// +// You may optionally set Rect and LanguageHints on the returned Image before +// using it. +func NewImageFromGCS(gcsURI string) *Image { + return &Image{gcsURI: gcsURI} +} + +// toProtos converts the Image to the two underlying API protos it represents, +// pb.Image and pb.ImageContext. +func (img *Image) toProtos() (*pb.Image, *pb.ImageContext) { + var pimg *pb.Image + switch { + case img.content != nil: + pimg = &pb.Image{Content: img.content} + case img.gcsURI != "": + pimg = &pb.Image{Source: &pb.ImageSource{GcsImageUri: img.gcsURI}} + } + + var pctx *pb.ImageContext + if img.Rect != nil || len(img.LanguageHints) > 0 { + pctx = &pb.ImageContext{ + LatLongRect: img.Rect.toProto(), + LanguageHints: img.LanguageHints, + } + } + + return pimg, pctx +} diff --git a/vendor/cloud.google.com/go/vision/latlng.go b/vendor/cloud.google.com/go/vision/latlng.go new file mode 100644 index 000000000..d553a3694 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/latlng.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +// A LatLng is a point on the Earth's surface, represented with a latitude and longitude. +type LatLng struct { + // Lat is the latitude in degrees. It must be in the range [-90.0, +90.0]. + Lat float64 + // Lng is the longitude in degrees. It must be in the range [-180.0, +180.0]. + Lng float64 +} + +func (l LatLng) toProto() *llpb.LatLng { + return &llpb.LatLng{ + Latitude: l.Lat, + Longitude: l.Lng, + } +} + +func latLngFromProto(ll *llpb.LatLng) LatLng { + return LatLng{ + Lat: ll.Latitude, + Lng: ll.Longitude, + } +} + +// A LatLngRect is a rectangular area on the Earth's surface, represented by a +// minimum and maximum latitude and longitude. +type LatLngRect struct { + Min, Max LatLng +} + +func (r *LatLngRect) toProto() *pb.LatLongRect { + if r == nil { + return nil + } + return &pb.LatLongRect{ + MinLatLng: r.Min.toProto(), + MaxLatLng: r.Max.toProto(), + } +} diff --git a/vendor/cloud.google.com/go/vision/testdata/README.md b/vendor/cloud.google.com/go/vision/testdata/README.md new file mode 100644 index 000000000..fd3ea341d --- /dev/null +++ b/vendor/cloud.google.com/go/vision/testdata/README.md @@ -0,0 +1,16 @@ +The following files were copied from https://github.com/GoogleCloudPlatform/cloud-vision/tree/master/data: +cat.jpg +face.jpg +faulkner.jpg +mountain.jpg +no-text.jpg + +eiffel-tower.jpg is from +https://commons.wikimedia.org/wiki/File:Tour_Eiffel_Wikimedia_Commons_(cropped).jpg. + +google.png is from the Google home page: +https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png. + + + + diff --git a/vendor/cloud.google.com/go/vision/testdata/cat.jpg b/vendor/cloud.google.com/go/vision/testdata/cat.jpg new file mode 100644 index 000000000..76af906f0 Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/cat.jpg differ diff --git a/vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg b/vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg new file mode 100644 index 000000000..9c32b85af Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg differ diff --git a/vendor/cloud.google.com/go/vision/testdata/face.jpg b/vendor/cloud.google.com/go/vision/testdata/face.jpg new file mode 100644 index 000000000..c0ee5580b Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/face.jpg differ diff --git a/vendor/cloud.google.com/go/vision/testdata/faulkner.jpg b/vendor/cloud.google.com/go/vision/testdata/faulkner.jpg new file mode 100644 index 000000000..93b8ac3ad Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/faulkner.jpg differ diff --git a/vendor/cloud.google.com/go/vision/testdata/google.png b/vendor/cloud.google.com/go/vision/testdata/google.png new file mode 100644 index 000000000..333bda937 Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/google.png differ diff --git a/vendor/cloud.google.com/go/vision/testdata/mountain.jpg b/vendor/cloud.google.com/go/vision/testdata/mountain.jpg new file mode 100644 index 000000000..f9505df38 Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/mountain.jpg differ diff --git a/vendor/cloud.google.com/go/vision/testdata/no-text.jpg b/vendor/cloud.google.com/go/vision/testdata/no-text.jpg new file mode 100644 index 000000000..8b77575de Binary files /dev/null and b/vendor/cloud.google.com/go/vision/testdata/no-text.jpg differ diff --git a/vendor/cloud.google.com/go/vision/vision.go b/vendor/cloud.google.com/go/vision/vision.go new file mode 100644 index 000000000..3aa4bbe60 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/vision.go @@ -0,0 +1,290 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image/color" + "math" + + vkit "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + cpb "google.golang.org/genproto/googleapis/type/color" +) + +// Scope is the OAuth2 scope required by the Google Cloud Vision API. +const Scope = "https://www.googleapis.com/auth/cloud-platform" + +// Client is a Google Cloud Vision API client. +type Client struct { + client *vkit.Client +} + +// NewClient creates a new vision client. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + c, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("vision", "0.1.0") + return &Client{client: c}, nil +} + +// Close closes the client. +func (c *Client) Close() error { + return c.client.Close() +} + +// Annotate annotates multiple images, each with a potentially differeent set +// of features. +func (c *Client) Annotate(ctx context.Context, requests ...*AnnotateRequest) ([]*Annotations, error) { + var reqs []*pb.AnnotateImageRequest + for _, r := range requests { + reqs = append(reqs, r.toProto()) + } + res, err := c.client.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{Requests: reqs}) + if err != nil { + return nil, err + } + var results []*Annotations + for _, res := range res.Responses { + results = append(results, annotationsFromProto(res)) + } + return results, nil +} + +// An AnnotateRequest specifies an image to annotate and the features to look for in that image. +type AnnotateRequest struct { + // Image is the image to annotate. + Image *Image + // MaxFaces is the maximum number of faces to detect in the image. + // Specifying a number greater than zero enables face detection. + MaxFaces int + // MaxLandmarks is the maximum number of landmarks to detect in the image. + // Specifying a number greater than zero enables landmark detection. + MaxLandmarks int + // MaxLogos is the maximum number of logos to detect in the image. + // Specifying a number greater than zero enables logo detection. + MaxLogos int + // MaxLabels is the maximum number of logos to detect in the image. + // Specifying a number greater than zero enables logo detection. + MaxLabels int + // MaxTexts is the maximum number of separate pieces of text to detect in the + // image. Specifying a number greater than zero enables text detection. + MaxTexts int + // SafeSearch specifies whether a safe-search detection should be run on the image. + SafeSearch bool + // ImageProps specifies whether image properties should be obtained for the image. + ImageProps bool +} + +func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { + img, ictx := ar.Image.toProtos() + var features []*pb.Feature + add := func(typ pb.Feature_Type, max int) { + var mr int32 + if max > math.MaxInt32 { + mr = math.MaxInt32 + } else { + mr = int32(max) + } + features = append(features, &pb.Feature{Type: typ, MaxResults: mr}) + } + if ar.MaxFaces > 0 { + add(pb.Feature_FACE_DETECTION, ar.MaxFaces) + } + if ar.MaxLandmarks > 0 { + add(pb.Feature_LANDMARK_DETECTION, ar.MaxLandmarks) + } + if ar.MaxLogos > 0 { + add(pb.Feature_LOGO_DETECTION, ar.MaxLogos) + } + if ar.MaxLabels > 0 { + add(pb.Feature_LABEL_DETECTION, ar.MaxLabels) + } + if ar.MaxTexts > 0 { + add(pb.Feature_TEXT_DETECTION, ar.MaxTexts) + } + if ar.SafeSearch { + add(pb.Feature_SAFE_SEARCH_DETECTION, 0) + } + if ar.ImageProps { + add(pb.Feature_IMAGE_PROPERTIES, 0) + } + return &pb.AnnotateImageRequest{ + Image: img, + Features: features, + ImageContext: ictx, + } +} + +// Called for a single image and a single feature. +func (c *Client) annotateOne(ctx context.Context, req *AnnotateRequest) (*Annotations, error) { + annsSlice, err := c.Annotate(ctx, req) + if err != nil { + return nil, err + } + anns := annsSlice[0] + // When there is only one image and one feature, the Annotations.Error field is + // unambiguously about that one detection, so we "promote" it to the error return value. + if anns.Error != nil { + return nil, anns.Error + } + return anns, nil +} + +// TODO(jba): add examples for all single-feature functions (below). + +// DetectFaces performs face detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectFaces(ctx context.Context, img *Image, maxResults int) ([]*FaceAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxFaces: maxResults}) + if err != nil { + return nil, err + } + return anns.Faces, nil +} + +// DetectLandmarks performs landmark detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLandmarks(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLandmarks: maxResults}) + if err != nil { + return nil, err + } + return anns.Landmarks, nil +} + +// DetectLogos performs logo detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLogos(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLogos: maxResults}) + if err != nil { + return nil, err + } + return anns.Logos, nil +} + +// DetectLabels performs label detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLabels(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLabels: maxResults}) + if err != nil { + return nil, err + } + return anns.Labels, nil +} + +// DetectTexts performs text detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectTexts(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxTexts: maxResults}) + if err != nil { + return nil, err + } + return anns.Texts, nil +} + +// DetectSafeSearch performs safe-search detection on the image. +func (c *Client) DetectSafeSearch(ctx context.Context, img *Image) (*SafeSearchAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, SafeSearch: true}) + if err != nil { + return nil, err + } + return anns.SafeSearch, nil +} + +// DetectImageProps computes properties of the image. +func (c *Client) DetectImageProps(ctx context.Context, img *Image) (*ImageProps, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, ImageProps: true}) + if err != nil { + return nil, err + } + return anns.ImageProps, nil +} + +// A Likelihood is an approximate representation of a probability. +type Likelihood int + +const ( + // LikelihoodUnknown means the likelihood is unknown. + LikelihoodUnknown = Likelihood(pb.Likelihood_UNKNOWN) + + // VeryUnlikely means the image is very unlikely to belong to the feature specified. + VeryUnlikely = Likelihood(pb.Likelihood_VERY_UNLIKELY) + + // Unlikely means the image is unlikely to belong to the feature specified. + Unlikely = Likelihood(pb.Likelihood_UNLIKELY) + + // Possible means the image possibly belongs to the feature specified. + Possible = Likelihood(pb.Likelihood_POSSIBLE) + + // Likely means the image is likely to belong to the feature specified. + Likely = Likelihood(pb.Likelihood_LIKELY) + + // VeryLikely means the image is very likely to belong to the feature specified. + VeryLikely = Likelihood(pb.Likelihood_VERY_LIKELY) +) + +// A Property is an arbitrary name-value pair. +type Property struct { + Name string + Value string +} + +func propertyFromProto(p *pb.Property) Property { + return Property{Name: p.Name, Value: p.Value} +} + +// ColorInfo consists of RGB channels, score and fraction of +// image the color occupies in the image. +type ColorInfo struct { + // RGB components of the color. + Color color.NRGBA64 + + // Score is the image-specific score for this color, in the range [0, 1]. + Score float32 + + // PixelFraction is the fraction of pixels the color occupies in the image, + // in the range [0, 1]. + PixelFraction float32 +} + +func colorInfoFromProto(ci *pb.ColorInfo) *ColorInfo { + return &ColorInfo{ + Color: colorFromProto(ci.Color), + Score: ci.Score, + PixelFraction: ci.PixelFraction, + } +} + +// Should this go into protobuf/ptypes? The color proto is in google/types, so +// not specific to this API. +func colorFromProto(c *cpb.Color) color.NRGBA64 { + // Convert a color component from [0.0, 1.0] to a uint16. + cvt := func(f float32) uint16 { return uint16(f*math.MaxUint16 + 0.5) } + + var alpha float32 = 1 + if c.Alpha != nil { + alpha = c.Alpha.Value + } + return color.NRGBA64{ + R: cvt(c.Red), + G: cvt(c.Green), + B: cvt(c.Blue), + A: cvt(alpha), + } +} diff --git a/vendor/cloud.google.com/go/vision/vision_test.go b/vendor/cloud.google.com/go/vision/vision_test.go new file mode 100644 index 000000000..f53134a1c --- /dev/null +++ b/vendor/cloud.google.com/go/vision/vision_test.go @@ -0,0 +1,251 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + "os" + "testing" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func TestAnnotate(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + tests := []struct { + path string // path to image file, relative to testdata + // If one of these is true, we expect that annotation to be non-nil. + faces, landmarks, logos, labels, texts bool + // We always expect safe search and image properties to be present. + }{ + {path: "face.jpg", faces: true, labels: true}, + {path: "cat.jpg", labels: true}, + {path: "faulkner.jpg", labels: true}, + {path: "mountain.jpg", texts: true, labels: true}, + {path: "no-text.jpg", labels: true}, + {path: "eiffel-tower.jpg", landmarks: true, labels: true}, + {path: "google.png", logos: true, labels: true, texts: true}, + } + for _, test := range tests { + annsSlice, err := client.Annotate(ctx, &AnnotateRequest{ + Image: testImage(test.path), + MaxFaces: 1, + MaxLandmarks: 1, + MaxLogos: 1, + MaxLabels: 1, + MaxTexts: 1, + SafeSearch: true, + ImageProps: true, + }) + if err != nil { + t.Fatalf("annotating %s: %v", test.path, err) + } + anns := annsSlice[0] + p := map[bool]string{true: "present", false: "absent"} + if got, want := (anns.Faces != nil), test.faces; got != want { + t.Errorf("%s: faces %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Landmarks != nil), test.landmarks; got != want { + t.Errorf("%s: landmarks %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Logos != nil), test.logos; got != want { + t.Errorf("%s: logos %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Labels != nil), test.labels; got != want { + t.Errorf("%s: labels %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Texts != nil), test.texts; got != want { + t.Errorf("%s: texts %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.SafeSearch != nil), true; got != want { + t.Errorf("%s: safe search %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.ImageProps != nil), true; got != want { + t.Errorf("%s: image properties %s, want %s", test.path, p[got], p[want]) + } + if anns.Error != nil { + t.Errorf("%s: got Error %v; want nil", test.path, anns.Error) + } + } +} + +func TestDetectMethods(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + for i, test := range []struct { + path string + call func(*Image) (bool, error) + }{ + {"face.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectFaces(ctx, img, 1) + return as != nil, err + }, + }, + {"eiffel-tower.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectLandmarks(ctx, img, 1) + return as != nil, err + }, + }, + {"google.png", + func(img *Image) (bool, error) { + as, err := client.DetectLogos(ctx, img, 1) + return as != nil, err + }, + }, + {"faulkner.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectLabels(ctx, img, 1) + return as != nil, err + }, + }, + {"mountain.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectTexts(ctx, img, 1) + return as != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectSafeSearch(ctx, img) + return as != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + ip, err := client.DetectImageProps(ctx, img) + return ip != nil, err + }, + }, + } { + present, err := test.call(testImage(test.path)) + if err != nil { + t.Errorf("%s, #%d: got err %v, want nil", test.path, i, err) + } + if !present { + t.Errorf("%s, #%d: nil annotation, want non-nil", test.path, i) + } + } +} + +// The DetectXXX methods of client that return EntityAnnotations. +var entityDetectionMethods = []func(*Client, context.Context, *Image, int) ([]*EntityAnnotation, error){ + (*Client).DetectLandmarks, + (*Client).DetectLogos, + (*Client).DetectLabels, + (*Client).DetectTexts, +} + +func TestErrors(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + // Empty image. + // With Client.Annotate, the RPC succeeds, but the Error field is non-nil. + _, err := client.Annotate(ctx, &AnnotateRequest{ + Image: &Image{}, + ImageProps: true, + }) + if err != nil { + t.Errorf("got %v, want nil", err) + } + + // Invalid image. + badImg := &Image{content: []byte("ceci n'est pas une image")} + // If only ImageProps is specified, the result is an annotation + // with all fields (including Error) nil. But any actual detection will fail. + _, err = client.Annotate(ctx, &AnnotateRequest{ + Image: badImg, + SafeSearch: true, + }) + if err != nil { + t.Errorf("got %v, want error", err) + } + + // With a Client.DetectXXX method, the Error field becomes the return value. + _, err = client.DetectFaces(ctx, &Image{}, 1) + if err == nil { + t.Error("got nil, want error") + } + for i, edm := range entityDetectionMethods { + _, err = edm(client, ctx, &Image{}, 1) + if err == nil { + t.Errorf("edm %d: got nil, want error", i) + } + } + _, err = client.DetectSafeSearch(ctx, &Image{}) + if err == nil { + t.Error("got nil, want error") + } + _, err = client.DetectImageProps(ctx, &Image{}) + if err == nil { + t.Error("got nil, want error") + } + + // Client.DetectXXX methods fail if passed a zero maxResults. + img := testImage("cat.jpg") + _, err = client.DetectFaces(ctx, img, 0) + if err == nil { + t.Error("got nil, want error") + } + for i, edm := range entityDetectionMethods { + _, err = edm(client, ctx, img, 0) + if err == nil { + t.Errorf("edm %d: got nil, want error", i) + } + } +} + +func integrationTestClient(ctx context.Context, t *testing.T) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + return client +} + +var images = map[string]*Image{} + +func testImage(path string) *Image { + if img, ok := images[path]; ok { + return img + } + f, err := os.Open("testdata/" + path) + if err != nil { + log.Fatal(err) + } + img, err := NewImageFromReader(f) + if err != nil { + log.Fatalf("reading image %q: %v", path, err) + } + images[path] = img + return img +} diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000..ab262cbe5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,31 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/.travis.yml b/vendor/github.com/Azure/go-autorest/.travis.yml new file mode 100644 index 000000000..abfb2831d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.travis.yml @@ -0,0 +1,25 @@ +sudo: false + +language: go + +go: + - 1.9 + - 1.8 + - 1.7 + - 1.6 + +install: + - go get -u github.com/golang/lint/golint + - go get -u github.com/Masterminds/glide + - go get -u github.com/stretchr/testify + - go get -u github.com/GoASTScanner/gas + - glide install + +script: + - grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)" + - test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)" + - test -z "$(golint ./autorest/... | tee /dev/stderr)" + - go vet ./autorest/... + - test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)" + - go build -v ./autorest/... + - go test -v ./autorest/... diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000..c809bf67e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,210 @@ +# CHANGELOG + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + + - RetriableRequest can now tolerate a ReadSeekable body being read but not reset. + - Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 +- Better error messages for long running operation failures + +## v7.0.3 +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000..a434e73ac --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000..f4c34d0e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,132 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +## Usage +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +## License + +See LICENSE file. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000..a17cf98c6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,253 @@ +# Azure Active Directory library for Go + +This project provides a stand alone Azure Active Directory library for Go. The code was extracted +from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for +[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). + + +## Installation + +``` +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go b/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go new file mode 100644 index 000000000..56b7d20d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go @@ -0,0 +1,298 @@ +package main + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "flag" + "fmt" + "log" + "strings" + + "crypto/rsa" + "crypto/x509" + "io/ioutil" + "net/http" + "os/user" + + "github.com/Azure/go-autorest/autorest/adal" + "golang.org/x/crypto/pkcs12" +) + +const ( + deviceMode = "device" + clientSecretMode = "secret" + clientCertMode = "cert" + refreshMode = "refresh" + + activeDirectoryEndpoint = "https://login.microsoftonline.com/" +) + +type option struct { + name string + value string +} + +var ( + mode string + resource string + + tenantID string + applicationID string + + applicationSecret string + certificatePath string + + tokenCachePath string +) + +func checkMandatoryOptions(mode string, options ...option) { + for _, option := range options { + if strings.TrimSpace(option.value) == "" { + log.Fatalf("Authentication mode '%s' requires mandatory option '%s'.", mode, option.name) + } + } +} + +func defaultTokenCachePath() string { + usr, err := user.Current() + if err != nil { + log.Fatal(err) + } + defaultTokenPath := usr.HomeDir + "/.adal/accessToken.json" + return defaultTokenPath +} + +func init() { + flag.StringVar(&mode, "mode", "device", "authentication mode (device, secret, cert, refresh)") + flag.StringVar(&resource, "resource", "", "resource for which the token is requested") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&applicationSecret, "secret", "", "application secret") + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/PFC application certificate") + flag.StringVar(&tokenCachePath, "tokenCachePath", defaultTokenCachePath(), "location of oath token cache") + + flag.Parse() + + switch mode = strings.TrimSpace(mode); mode { + case clientSecretMode: + checkMandatoryOptions(clientSecretMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + option{name: "secret", value: applicationSecret}, + ) + case clientCertMode: + checkMandatoryOptions(clientCertMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + option{name: "certificatePath", value: certificatePath}, + ) + case deviceMode: + checkMandatoryOptions(deviceMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + ) + case refreshMode: + checkMandatoryOptions(refreshMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + ) + default: + log.Fatalln("Authentication modes 'secret, 'cert', 'device' or 'refresh' are supported.") + } +} + +func acquireTokenClientSecretFlow(oauthConfig adal.OAuthConfig, + appliationID string, + applicationSecret string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) + if err != nil { + return nil, err + } + + return spt, spt.Refresh() +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +func acquireTokenClientCertFlow(oauthConfig adal.OAuthConfig, + applicationID string, + applicationCertPath string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + if err != nil { + return nil, err + } + + return spt, spt.Refresh() +} + +func acquireTokenDeviceCodeFlow(oauthConfig adal.OAuthConfig, + applicationID string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + oauthClient := &http.Client{} + deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) + if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) + } + + fmt.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + return spt, err +} + +func refreshToken(oauthConfig adal.OAuthConfig, + applicationID string, + resource string, + tokenCachePath string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + token, err := adal.LoadToken(tokenCachePath) + if err != nil { + return nil, fmt.Errorf("failed to load token from cache: %v", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + if err != nil { + return nil, err + } + return spt, spt.Refresh() +} + +func saveToken(spt adal.Token) error { + if tokenCachePath != "" { + err := adal.SaveToken(tokenCachePath, 0600, spt) + if err != nil { + return err + } + log.Printf("Acquired token was saved in '%s' file\n", tokenCachePath) + return nil + + } + return fmt.Errorf("empty path for token cache") +} + +func main() { + oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + if err != nil { + panic(err) + } + + callback := func(token adal.Token) error { + return saveToken(token) + } + + log.Printf("Authenticating with mode '%s'\n", mode) + switch mode { + case clientSecretMode: + _, err = acquireTokenClientSecretFlow( + *oauthConfig, + applicationID, + applicationSecret, + resource, + callback) + case clientCertMode: + _, err = acquireTokenClientCertFlow( + *oauthConfig, + applicationID, + certificatePath, + resource, + callback) + case deviceMode: + var spt *adal.ServicePrincipalToken + spt, err = acquireTokenDeviceCodeFlow( + *oauthConfig, + applicationID, + resource, + callback) + if err == nil { + err = saveToken(spt.Token) + } + case refreshMode: + _, err = refreshToken( + *oauthConfig, + applicationID, + resource, + tokenCachePath, + callback) + } + + if err != nil { + log.Fatalf("Failed to acquire a token for resource %s. Error: %v", resource, err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..49e9214d5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,65 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go new file mode 100644 index 000000000..304280f66 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go @@ -0,0 +1,44 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "testing" +) + +func TestNewOAuthConfig(t *testing.T) { + const testActiveDirectoryEndpoint = "https://login.test.com" + const testTenantID = "tenant-id-test" + + config, err := NewOAuthConfig(testActiveDirectoryEndpoint, testTenantID) + if err != nil { + t.Fatalf("autorest/adal: Unexpected error while creating oauth configuration for tenant: %v.", err) + } + + expected := "https://login.test.com/tenant-id-test/oauth2/authorize?api-version=1.0" + if config.AuthorizeEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/token?api-version=1.0" + if config.TokenEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/devicecode?api-version=1.0" + if config.DeviceCodeEndpoint.String() != expected { + t.Fatalf("autorest/adal Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000..b38f4c245 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go new file mode 100644 index 000000000..6beee161f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go @@ -0,0 +1,330 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestResource = "SomeResource" + TestClientID = "SomeClientID" + TestTenantID = "SomeTenantID" + TestActiveDirectoryEndpoint = "https://login.test.com/" +) + +var ( + testOAuthConfig, _ = NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + TestOAuthConfig = *testOAuthConfig +) + +const MockDeviceCodeResponse = ` +{ + "device_code": "10000-40-1234567890", + "user_code": "ABCDEF", + "verification_url": "http://aka.ms/deviceauth", + "expires_in": "900", + "interval": "0" +} +` + +const MockDeviceTokenResponse = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +} +` + +func TestDeviceCodeIncludesResource(t *testing.T) { + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) + + code, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err != nil { + t.Fatalf("adal: unexpected error initiating device auth") + } + + if code.Resource != TestResource { + t.Fatalf("adal: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") + } +} + +func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) + } +} + +func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("doesn't matter") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfEmptyDeviceCode(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err != ErrDeviceCodeEmpty { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", ErrDeviceCodeEmpty, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func deviceCode() *DeviceCode { + var deviceCode DeviceCode + _ = json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) + deviceCode.Resource = TestResource + deviceCode.ClientID = TestClientID + return &deviceCode +} + +func TestDeviceTokenReturns(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(MockDeviceTokenResponse) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("adal: got error unexpectedly") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) + } +} + +func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusInternalServerError, "Internal Server Error")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func errorDeviceTokenResponse(message string) string { + return `{ "error": "` + message + `" }` +} + +func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := CheckForUserCompletion(sender, deviceCode()) + if err != ErrDeviceAuthorizationPending { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := CheckForUserCompletion(sender, deviceCode()) + if err != ErrDeviceSlowDown { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +type deviceTokenSender struct { + errorString string + attempts int +} + +func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender { + return &deviceTokenSender{errorString: deviceErrorString, attempts: 0} +} + +func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { + var resp *http.Response + if s.attempts < 1 { + s.attempts++ + resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString)) + } else { + resp = mocks.NewResponseWithContent(MockDeviceTokenResponse) + } + return resp, nil +} + +// since the above only exercise CheckForUserCompletion, we repeat the test here, +// but with the intent of showing that WaitForUserCompletion loops properly. +func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { + sender := newDeviceTokenSender("authorization_pending") + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +// same as above but with SlowDown now +func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { + sender := newDeviceTokenSender("slow_down") + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrDeviceAccessDenied { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrDeviceCodeExpired { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil { + t.Fatalf("failed to get error") + } + if err != ErrDeviceGeneric { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfTokenEmptyAndStatusOK(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrOAuthTokenEmpty { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrOAuthTokenEmpty.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 000000000..5e02d52ac --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,20 @@ +// +build !windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 000000000..261b56882 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000..9e15f2751 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go new file mode 100644 index 000000000..a9c287c6d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go @@ -0,0 +1,171 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "reflect" + "runtime" + "strings" + "testing" +) + +const MockTokenJSON string = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +}` + +var TestToken = Token{ + AccessToken: "accessToken", + RefreshToken: "refreshToken", + ExpiresIn: "1000", + ExpiresOn: "2000", + NotBefore: "3000", + Resource: "resource", + Type: "type", +} + +func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File { + f, err := ioutil.TempFile(os.TempDir(), suffix) + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer f.Close() + + _, err = f.Write([]byte(contents)) + if err != nil { + t.Fatalf("azure: unexpected error when writing temp test file: %v", err) + } + + return f +} + +func TestLoadToken(t *testing.T) { + f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON) + defer os.Remove(f.Name()) + + expectedToken := TestToken + actualToken, err := LoadToken(f.Name()) + if err != nil { + t.Fatalf("azure: unexpected error loading token from file: %v", err) + } + + if *actualToken != expectedToken { + t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken) + } + + // test that LoadToken closes the file properly + err = SaveToken(f.Name(), 0600, *actualToken) + if err != nil { + t.Fatalf("azure: could not save token after LoadToken: %v", err) + } +} + +func TestLoadTokenFailsBadPath(t *testing.T) { + _, err := LoadToken("/tmp/this_file_should_never_exist_really") + expectedSubstring := "failed to open file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func TestLoadTokenFailsBadJson(t *testing.T) { + gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1) + f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON) + defer os.Remove(f.Name()) + + _, err := LoadToken(f.Name()) + expectedSubstring := "failed to decode contents of file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func token() *Token { + var token Token + json.Unmarshal([]byte(MockTokenJSON), &token) + return &token +} + +func TestSaveToken(t *testing.T) { + f, err := ioutil.TempFile("", "testloadtoken") + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer os.Remove(f.Name()) + f.Close() + + mode := os.ModePerm & 0642 + err = SaveToken(f.Name(), mode, *token()) + if err != nil { + t.Fatalf("azure: unexpected error saving token to file: %v", err) + } + fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh + if err != nil { + t.Fatalf("azure: stat failed: %v", err) + } + if runtime.GOOS != "windows" { // permissions don't work on Windows + if perm := fi.Mode().Perm(); perm != mode { + t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name()) + } + } + + var actualToken Token + var expectedToken Token + + json.Unmarshal([]byte(MockTokenJSON), expectedToken) + + contents, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal("!!") + } + json.Unmarshal(contents, actualToken) + + if !reflect.DeepEqual(actualToken, expectedToken) { + t.Fatal("azure: token was not serialized correctly") + } +} + +func TestSaveTokenFailsNoPermission(t *testing.T) { + pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall" + if runtime.GOOS == "windows" { + pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken") + } + err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token()) + expectedSubstring := "failed to create directory" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} + +func TestSaveTokenFailsCantCreate(t *testing.T) { + tokenPath := "/thiswontwork" + if runtime.GOOS == "windows" { + tokenPath = path.Join(os.Getenv("windir"), "system32") + } + err := SaveToken(tokenPath, 0644, *token()) + expectedSubstring := "failed to create the temp file to write the token" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..0e5ad14d3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000..67dd97a18 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,427 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) +} + +func getMSIVMEndpoint(path string) (string, error) { + // Read MSI settings + bytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + msiSettings := struct { + URL string `json:"url"` + }{} + err = json.Unmarshal(bytes, &msiSettings) + if err != nil { + return "", err + } + + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") + if err != nil { + return nil, err + } + + spt := &ServicePrincipalToken{ + oauthConfig: *oauthConfig, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + return spt, nil +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } + resp, err := spt.sender.Do(req) + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) + } + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go new file mode 100644 index 000000000..73c848ba0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go @@ -0,0 +1,654 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" + defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource" +) + +func TestTokenExpires(t *testing.T) { + tt := time.Now().Add(5 * time.Second) + tk := newTokenExpiresAt(tt) + + if tk.Expires().Equal(tt) { + t.Fatalf("adal: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + } +} + +func TestTokenIsExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) + + if !tk.IsExpired() { + t.Fatalf("adal: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenIsExpiredUninitialized(t *testing.T) { + tk := &Token{} + + if !tk.IsExpired() { + t.Fatalf("adal: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + } +} + +func TestTokenIsNoExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) + + if tk.IsExpired() { + t.Fatalf("adal: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenWillExpireIn(t *testing.T) { + d := 5 * time.Second + tk := newTokenExpiresIn(d) + + if !tk.WillExpireIn(d) { + t.Fatal("adal: Token#WillExpireIn mismeasured expiration time") + } +} + +func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + if !spt.autoRefresh { + t.Fatal("adal: ServicePrincipalToken did not default to automatic token refreshing") + } + + spt.SetAutoRefresh(false) + if spt.autoRefresh { + t.Fatal("adal: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + } +} + +func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { + spt := newServicePrincipalToken() + + if spt.refreshWithin != defaultRefresh { + t.Fatal("adal: ServicePrincipalToken did not correctly set the default refresh interval") + } + + spt.SetRefreshWithin(2 * defaultRefresh) + if spt.refreshWithin != 2*defaultRefresh { + t.Fatal("adal: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + } +} + +func TestServicePrincipalTokenSetSender(t *testing.T) { + spt := newServicePrincipalToken() + + c := &http.Client{} + spt.SetSender(c) + if !reflect.DeepEqual(c, spt.sender) { + t.Fatal("adal: ServicePrincipalToken#SetSender did not set the sender") + } +} + +func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenFromMSIRefreshUsesPOST(t *testing.T) { + resource := "https://resource" + cb := func(token Token) error { return nil } + + spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + if h := r.Header.Get("Metadata"); h != "true" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Metadata header for MSI") + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err = spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + "application/x-form-urlencoded", + r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + TestOAuthConfig.TokenEndpoint, r.URL) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("adal: Failed to read body of Service Principal token request (%v)", err) + } + f(t, b) + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { + sptManual := newServicePrincipalTokenManual() + testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { + if string(b) != defaultManualFormData { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultManualFormData, string(b)) + } + }) +} + +func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { + sptCert := newServicePrincipalTokenCertificate(t) + testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" || + values["client_id"][0] != "id" || + values["grant_type"][0] != "client_credentials" || + values["resource"][0] != "resource" { + t.Fatalf("adal: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalToken() + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + if string(b) != defaultFormData { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultFormData, string(b)) + } + + }) +} + +func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + if resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("adal: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshRejectsResponsesWithStatusNotOK(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusUnauthorized, "Unauthorized") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh should reject a response with status != %d", http.StatusOK) + } +} + +func TestServicePrincipalTokenRefreshRejectsEmptyBody(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatal("adal: ServicePrincipalToken#Refresh should reject an empty token") + } +} + +func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.SetError(fmt.Errorf("Faux Error")) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("adal: Failed to propagate the request error") + } +} + +func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", http.StatusUnauthorized)) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: Failed to return an when receiving a status code other than HTTP %d", http.StatusOK) + } +} + +func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { + spt := newServicePrincipalToken() + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + j := newTokenJSON(expiresOn, "resource") + resp := mocks.NewResponseWithContent(j) + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } else if spt.AccessToken != "accessToken" || + spt.ExpiresIn != "3600" || + spt.ExpiresOn != expiresOn || + spt.NotBefore != expiresOn || + spt.Resource != "resource" || + spt.Type != "Bearer" { + t.Fatalf("adal: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + j, *spt) + } +} + +func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.Token) + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + f := false + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.EnsureFresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) + } + if !f { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + } +} + +func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + f := false + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + err := spt.EnsureFresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) + } + if f { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") + } +} + +func TestRefreshCallback(t *testing.T) { + callbackTriggered := false + spt := newServicePrincipalToken(func(Token) error { + callbackTriggered = true + return nil + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + if !callbackTriggered { + t.Fatalf("adal: RefreshCallback failed to trigger call callback") + } +} + +func TestRefreshCallbackErrorPropagates(t *testing.T) { + errorText := "this is an error text" + spt := newServicePrincipalToken(func(Token) error { + return fmt.Errorf(errorText) + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + + if err == nil || !strings.Contains(err.Error(), errorText) { + t.Fatalf("adal: RefreshCallback failed to propagate error") + } +} + +// This demonstrates the danger of manual token without a refresh token +func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { + spt := newServicePrincipalTokenManual() + spt.RefreshToken = "" + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + } +} + +func TestNewServicePrincipalTokenFromMSI(t *testing.T) { + resource := "https://resource" + cb := func(token Token) error { return nil } + + spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + // check some of the SPT fields + if _, ok := spt.secret.(*ServicePrincipalMSISecret); !ok { + t.Fatal("SPT secret was not of MSI type") + } + + if spt.resource != resource { + t.Fatal("SPT came back with incorrect resource") + } + + if len(spt.refreshCallbacks) != 1 { + t.Fatal("SPT had incorrect refresh callbacks.") + } +} + +func TestGetVMEndpoint(t *testing.T) { + tempSettingsFile, err := ioutil.TempFile("", "ManagedIdentity-Settings") + if err != nil { + t.Fatal("Couldn't write temp settings file") + } + defer os.Remove(tempSettingsFile.Name()) + + settingsContents := []byte(`{ + "url": "http://msiendpoint/" + }`) + + if _, err := tempSettingsFile.Write(settingsContents); err != nil { + t.Fatal("Couldn't fill temp settings file") + } + + endpoint, err := getMSIVMEndpoint(tempSettingsFile.Name()) + if err != nil { + t.Fatal("Coudn't get VM endpoint") + } + + if endpoint != "http://msiendpoint/" { + t.Fatal("Didn't get correct endpoint") + } +} + +func newToken() *Token { + return &Token{ + AccessToken: "ASECRETVALUE", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } +} + +func newTokenJSON(expiresOn string, resource string) string { + return fmt.Sprintf(`{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "%s", + "not_before" : "%s", + "resource" : "%s", + "token_type" : "Bearer" + }`, + expiresOn, expiresOn, resource) +} + +func newTokenExpiresIn(expireIn time.Duration) *Token { + return setTokenToExpireIn(newToken(), expireIn) +} + +func newTokenExpiresAt(expireAt time.Time) *Token { + return setTokenToExpireAt(newToken(), expireAt) +} + +func expireToken(t *Token) *Token { + return setTokenToExpireIn(t, 0) +} + +func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { + t.ExpiresIn = "3600" + t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(date.UnixEpoch()).Seconds())) + t.NotBefore = t.ExpiresOn + return t +} + +func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { + return setTokenToExpireAt(t, time.Now().Add(expireIn)) +} + +func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken { + spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...) + return spt +} + +func newServicePrincipalTokenManual() *ServicePrincipalToken { + token := newToken() + token.RefreshToken = "refreshtoken" + spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token) + return spt +} + +func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { + template := x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{CommonName: "test"}, + BasicConstraintsValid: true, + } + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(certificateBytes) + if err != nil { + t.Fatal(err) + } + + spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource") + return spt +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..71e3ced2d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,181 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return (ba.withBearerAuthorization()(p)).Prepare(r) + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return ba.WithAuthorization()(p).Prepare(r) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go new file mode 100644 index 000000000..635fcfd7f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go @@ -0,0 +1,188 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestTenantID = "TestTenantID" + TestActiveDirectoryEndpoint = "https://login/test.com/" +) + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} + +func TestTokenWithAuthorization(t *testing.T) { + token := &adal.Token{ + AccessToken: "TestToken", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } + + ba := NewBearerAuthorizer(token) + req, err := Prepare(&http.Request{}, ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", token.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationNoRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt.SetAutoRefresh(false) + s := mocks.NewSender() + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationRefresh(t *testing.T) { + + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + refreshed := false + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", func(t adal.Token) error { + refreshed = true + return nil + }) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + jwt := `{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "test", + "not_before" : "test", + "resource" : "test", + "token_type" : "Bearer" + }` + body := mocks.NewBody(jwt) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } + + if !refreshed { + t.Fatal("azure: BearerAuthorizer#WithAuthorization must refresh the token") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfConnotRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + s := mocks.NewSender() + s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest)) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + _, err = Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err == nil { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to return an error when refresh fails") + } +} + +func TestBearerAuthorizerCallback(t *testing.T) { + tenantString := "123-tenantID-456" + resourceString := "https://fake.resource.net" + + s := mocks.NewSender() + resp := mocks.NewResponseWithStatus("401 Unauthorized", http.StatusUnauthorized) + mocks.SetResponseHeader(resp, bearerChallengeHeader, bearer+" \"authorization\"=\"https://fake.net/"+tenantString+"\",\"resource\"=\""+resourceString+"\"") + s.AppendResponse(resp) + + auth := NewBearerAuthorizerCallback(s, func(tenantID, resource string) (*BearerAuthorizer, error) { + if tenantID != tenantString { + t.Fatal("BearerAuthorizerCallback: bad tenant ID") + } + if resource != resourceString { + t.Fatal("BearerAuthorizerCallback: bad resource") + } + + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, tenantID) + if err != nil { + t.Fatalf("azure: NewOAuthConfig returned an error (%v)", err) + } + + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", resource) + if err != nil { + t.Fatalf("azure: NewServicePrincipalToken returned an error (%v)", err) + } + + spt.SetSender(s) + return NewBearerAuthorizer(spt), nil + }) + + _, err := Prepare(mocks.NewRequest(), auth.WithAuthorization()) + if err == nil { + t.Fatal("azure: BearerAuthorizerCallback#WithAuthorization failed to return an error when refresh fails") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..37b907c77 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,129 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go b/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go new file mode 100644 index 000000000..467ea438b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go @@ -0,0 +1,140 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestResponseHasStatusCode(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusAccepted} + if !ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseHasStatusCodeNotPresent(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing") + } +} + +func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + _, err := NewPollingRequest(resp, nil) + if err == nil { + t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.Method != "GET" { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestNewPollingRequestProvidesTheURL(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.URL.String() != mocks.TestURL { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) + } +} + +func TestGetLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + l := GetLocation(resp) + if len(l) == 0 { + t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) + } +} + +func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + l := GetLocation(resp) + if len(l) != 0 { + t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l) + } +} + +func TestGetRetryAfter(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != mocks.TestDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value") + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000..ffbc8da28 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,316 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go new file mode 100644 index 000000000..727c357e7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go @@ -0,0 +1,1130 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestGetAsyncOperation_ReturnsAzureAsyncOperationHeader(t *testing.T) { + r := newAsynchronousResponse() + + if getAsyncOperation(r) != mocks.TestAzureAsyncURL { + t.Fatalf("azure: getAsyncOperation failed to extract the Azure-AsyncOperation header -- expected %v, received %v", mocks.TestURL, getAsyncOperation(r)) + } +} + +func TestGetAsyncOperation_ReturnsEmptyStringIfHeaderIsAbsent(t *testing.T) { + r := mocks.NewResponse() + + if len(getAsyncOperation(r)) != 0 { + t.Fatalf("azure: getAsyncOperation failed to return empty string when the Azure-AsyncOperation header is absent -- received %v", getAsyncOperation(r)) + } +} + +func TestHasSucceeded_ReturnsTrueForSuccess(t *testing.T) { + if !hasSucceeded(operationSucceeded) { + t.Fatal("azure: hasSucceeded failed to return true for success") + } +} + +func TestHasSucceeded_ReturnsFalseOtherwise(t *testing.T) { + if hasSucceeded("not a success string") { + t.Fatal("azure: hasSucceeded returned true for a non-success") + } +} + +func TestHasTerminated_ReturnsTrueForValidTerminationStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !hasTerminated(state) { + t.Fatalf("azure: hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestHasTerminated_ReturnsFalseForUnknownStates(t *testing.T) { + if hasTerminated("not a known state") { + t.Fatal("azure: hasTerminated returned true for an unknown state") + } +} + +func TestOperationError_ErrorReturnsAString(t *testing.T) { + s := (ServiceError{Code: "server code", Message: "server error"}).Error() + if s == "" { + t.Fatalf("azure: operationError#Error failed to return an error") + } + if !strings.Contains(s, "server code") || !strings.Contains(s, "server error") { + t.Fatalf("azure: operationError#Error returned a malformed error -- error='%v'", s) + } +} + +func TestOperationResource_StateReturnsState(t *testing.T) { + if (operationResource{Status: "state"}).state() != "state" { + t.Fatalf("azure: operationResource#state failed to return the correct state") + } +} + +func TestOperationResource_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (operationResource{Status: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestOperationResource_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(operationResource{Status: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return true for a successful operation") + } +} + +func TestOperationResource_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(operationResource{Status: state}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestOperationResource_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (operationResource{Status: "not a known state"}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated returned true for a non-terminal operation") + } +} + +func TestProvisioningStatus_StateReturnsState(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"state"}}).state() != "state" { + t.Fatalf("azure: provisioningStatus#state failed to return the correct state") + } +} + +func TestProvisioningStatus_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a success string"}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestProvisioningStatus_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(provisioningStatus{Properties: provisioningProperties{operationSucceeded}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return true for a successful operation") + } +} + +func TestProvisioningStatus_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(provisioningStatus{Properties: provisioningProperties{state}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestProvisioningStatus_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a known state"}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated returned true for a non-terminal operation") + } +} + +func TestPollingState_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (pollingState{state: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestPollingState_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(pollingState{state: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return true for a successful operation") + } +} + +func TestPollingState_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(pollingState{state: state}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestPollingState_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (pollingState{state: "not a known state"}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated returned true for a non-terminal operation") + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfOneOccurs(t *testing.T) { + resp := mocks.NewResponseWithContent(operationResourceIllegal) + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error after a JSON parsing error") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownProvisioningStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulProvisioningState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherProvisioningStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_ReturnsSuccessWhenProvisioningStateFieldIsAbsentForSuccessStatusCodes(t *testing.T) { + for _, sc := range []int{http.StatusOK, http.StatusCreated, http.StatusNoContent} { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = sc + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return success when the provisionState field is absent for Status Code %d", sc) + } + } +} + +func TestUpdatePollingState_ReturnsInProgressWhenProvisioningStateFieldIsAbsentForAccepted(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = http.StatusAccepted + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated when the provisionState field is absent for Status Code Accepted") + } +} + +func TestUpdatePollingState_ReturnsFailedWhenProvisioningStateFieldIsAbsentForUnknownStatusCodes(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() || ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState did not return failed when the provisionState field is absent for an unknown Status Code") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownOperationResourceStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulOperationResourceState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherOperationResourceStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_CopiesTheResponseBody(t *testing.T) { + s := fmt.Sprintf(pollingStateFormat, operationSucceeded) + resp := mocks.NewResponseWithContent(s) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("azure: updatePollingState failed to replace the http.Response Body -- Error='%v'", err) + } + if string(b) != s { + t.Fatalf("azure: updatePollingState failed to copy the http.Response Body -- Expected='%s' Received='%s'", s, string(b)) + } +} + +func TestUpdatePollingState_ClosesTheOriginalResponseBody(t *testing.T) { + resp := mocks.NewResponse() + b := resp.Body.(*mocks.Body) + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if b.IsOpen() { + t.Fatal("azure: updatePollingState failed to close the original http.Response Body") + } +} + +func TestUpdatePollingState_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + ps := pollingState{} + err := updatePollingState(resp, &ps) + if err == nil { + t.Fatal("azure: updatePollingState failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeader(t *testing.T) { + ps := pollingState{} + updatePollingState(newAsynchronousResponse(), &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to set the correct response format when using the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeaderIsMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesProvisioningStatus { + t.Fatal("azure: updatePollingState failed to set the correct response format when the Azure-AsyncOperation header is absent") + } +} + +func TestUpdatePollingState_DoesNotChangeAnExistingReponseFormat(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to leave an existing response format setting") + } +} + +func TestUpdatePollingState_PrefersTheAzureAsyncOperationHeader(t *testing.T) { + resp := newAsynchronousResponse() + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestAzureAsyncURL { + t.Fatal("azure: updatePollingState failed to prefer the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_PrefersLocationWhenTheAzureAsyncOperationHeaderMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestLocationURL { + t.Fatal("azure: updatePollingState failed to prefer the Location header when the Azure-AsyncOperation header is missing") + } +} + +func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = http.MethodPatch + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatal("azure: updatePollingState failed to use the Object URL when the asynchronous headers are missing") + } +} + +func TestUpdatePollingState_RecognizesLowerCaseHTTPVerbs(t *testing.T) { + for _, m := range []string{strings.ToLower(http.MethodPatch), strings.ToLower(http.MethodPut), strings.ToLower(http.MethodGet)} { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = m + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatalf("azure: updatePollingState failed to recognize the lower-case HTTP verb '%s'", m) + } + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfAsyncHeadersAreMissingForANewOrDeletedObject(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + for _, m := range []string{http.MethodDelete, http.MethodPost} { + resp.Request.Method = m + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error even though it could not determine the polling URL for Method '%s'", m) + } + } +} + +func TestNewPollingRequest_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + _, err := newPollingRequest(resp, pollingState{}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestNewPollingRequest_ReturnsAnErrorWhenPrepareFails(t *testing.T) { + _, err := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequest_DoesNotReturnARequestWhenPrepareFails(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if req != nil { + t.Fatal("azure: newPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequest_ReturnsAGetRequest(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestAzureAsyncURL}) + if req.Method != "GET" { + t.Fatalf("azure: newPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestDoPollForAsynchronous_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous polled for unspecified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAsynchronousResponse()) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 2 { + t.Fatalf("azure: DoPollForAsynchronous failed to poll for specified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r1 := newAsynchronousResponse() + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(newOperationResourceResponse("Busy"), -1) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + + wg.Done() + + r, _ := autorest.SendWithSender(client, req, + DoPollForAsynchronous(10*time.Second)) + autorest.Respond(r, + autorest.ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("azure: DoPollForAsynchronous failed to cancel") + } +} + +func TestDoPollForAsynchronous_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + r1 := newAsynchronousResponse() + b1 := r1.Body.(*mocks.Body) + r2 := newOperationResourceResponse("busy") + b2 := r2.Body.(*mocks.Body) + r3 := newOperationResourceResponse(operationSucceeded) + b3 := r3.Body.(*mocks.Body) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if b1.IsOpen() || b2.IsOpen() || b3.IsOpen() { + t.Fatalf("azure: DoPollForAsynchronous did not close unreturned response bodies") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_LeavesLastResponseBodyOpen(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + b, err := ioutil.ReadAll(r.Body) + if len(b) <= 0 || err != nil { + t.Fatalf("azure: DoPollForAsynchronous did not leave open the body of the last response - Error='%v'", err) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfOriginalRequestReturnedAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + client.SetError(fmt.Errorf("Faux Error")) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous tried to poll after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfCreatingOperationRequestFails(t *testing.T) { + r1 := newAsynchronousResponse() + mocks.SetResponseHeader(r1, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestBadURL) + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 1 { + t.Fatalf("azure: DoPollForAsynchronous polled with an invalidly formed operation request") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingAfterAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 3 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAsynchronousResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error from polling") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusAccepted(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "202 Accepted" + r1.StatusCode = http.StatusAccepted + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusCreated(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "201 Created" + r1.StatusCode = http.StatusCreated + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusTerminates(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationCanceled) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusSucceeds(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationSucceeded) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasSucceeded(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingWhenOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop after receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForCanceledOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Canceled") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForFailedOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Failed") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_WithNilURI(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r1.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + r2 := newOperationResourceResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + + req, _ := http.NewRequest("POST", "https://microsoft.com/a/b/c/", mocks.NewBody("")) + r, err := autorest.SendWithSender(client, req, + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error for nil URI. got: nil; want: Azure Polling Error - Unable to obtain polling URI for POST") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnUnknownErrorForFailedOperations(t *testing.T) { + // Return unknown error if error not present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationFailed) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("Unknown", "None") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForLastErrorResponse(t *testing.T) { + // Return error code and message if error present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newAsynchronousResponseWithError() + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("InvalidParameter", "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsOperationResourceErrorForFailedOperations(t *testing.T) { + // Return Operation resource response with error code and message in last operation resource response + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("BadArgument", "The provided database 'foo' has an invalid username.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForFirstPutRequest(t *testing.T) { + // Return 400 bad response with error code and message in first put + r1 := newAsynchronousResponseWithError() + client := mocks.NewSender() + client.AppendResponse(r1) + + res, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + errorResponse, err.Error()) + } + + err = autorest.Respond(res, + WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + + reqError, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &RequestError{ + ServiceError: &ServiceError{ + Code: "InvalidParameter", + Message: "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.", + }, + DetailedError: autorest.DetailedError{ + StatusCode: 400, + }, + } + if !reflect.DeepEqual(reqError, expected) { + t.Fatalf("azure: wrong error. expected=%q\ngot=%q", expected, reqError) + } + + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != errorResponse { + t.Fatalf("azure: Response body is wrong. got=%q expected=%q", string(b), errorResponse) + } + +} + +func TestDoPollForAsynchronous_ReturnsNoErrorForSuccessfulOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous returned an error for a successful OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingIfItReceivesAnInvalidOperationResource(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse("busy") + r3.Body = mocks.NewBody(operationResourceIllegal) + r4 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + client.AppendAndRepeatResponse(r4, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an invalid OperationResource") + } + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an error after receving an invalid OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +const ( + operationResourceIllegal = ` + This is not JSON and should fail...badly. + ` + pollingStateFormat = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + "provisioningState": "%s" + } + } + ` + + errorResponse = ` + { + "error" : { + "code" : "InvalidParameter", + "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." + } + } + ` + + pollingStateEmpty = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + } + } + ` + + operationResourceFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {} + } + ` + + operationResourceErrorFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {}, + "error" : { + "code" : "BadArgument", + "message" : "The provided database 'foo' has an invalid username." + } + } + ` +) + +func newAsynchronousResponse() *http.Response { + r := mocks.NewResponseWithStatus("201 Created", http.StatusCreated) + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, operationInProgress)) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestAzureAsyncURL) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(autorest.HeaderLocation), mocks.TestLocationURL) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + return r +} + +func newAsynchronousResponseWithError() *http.Response { + r := mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + r.Body = mocks.NewBody(errorResponse) + return r +} + +func newOperationResourceResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceFormat, status)) + return r +} + +func newOperationResourceErrorResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceErrorFormat, status)) + return r +} + +func newProvisioningStatusResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, status)) + return r +} + +func makeLongRunningOperationErrorString(code string, message string) string { + return fmt.Sprintf("Long running operation terminated with status 'Failed': Code=%q Message=%q", code, message) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile.go new file mode 100644 index 000000000..84f420e2a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile.go @@ -0,0 +1,129 @@ +package auth + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/dimchansky/utfbom" +) + +// ClientSetup includes authentication details and cloud specific +// parameters for ARM clients +type ClientSetup struct { + *autorest.BearerAuthorizer + File + BaseURI string +} + +// File represents the authentication file +type File struct { + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` + SubscriptionID string `json:"subscriptionId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"` + ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"` + GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"` + SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"` + GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"` + ManagementEndpoint string `json:"managementEndpointUrl,omitempty"` +} + +// GetClientSetup provides an authorizer, base URI, subscriptionID and +// tenantID parameters from an Azure CLI auth file +func GetClientSetup(baseURI string) (auth ClientSetup, err error) { + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return auth, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return + } + + err = json.Unmarshal(decoded, &auth.File) + if err != nil { + return + } + + resource, err := getResourceForToken(auth.File, baseURI) + if err != nil { + return + } + auth.BaseURI = resource + + config, err := adal.NewOAuthConfig(auth.ActiveDirectoryEndpoint, auth.TenantID) + if err != nil { + return + } + + spToken, err := adal.NewServicePrincipalToken(*config, auth.ClientID, auth.ClientSecret, resource) + if err != nil { + return + } + + auth.BearerAuthorizer = autorest.NewBearerAuthorizer(spToken) + return +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func getResourceForToken(f File, baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return f.ManagementEndpoint, nil + case azure.PublicCloud.ResourceManagerEndpoint: + return f.ResourceManagerEndpoint, nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return f.ActiveDirectoryEndpoint, nil + case azure.PublicCloud.GalleryEndpoint: + return f.GalleryEndpoint, nil + case azure.PublicCloud.GraphEndpoint: + return f.GraphResourceID, nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile_test.go new file mode 100644 index 000000000..21cbde0ce --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile_test.go @@ -0,0 +1,97 @@ +package auth + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" +) + +var ( + expectedFile = File{ + ClientID: "client-id-123", + ClientSecret: "client-secret-456", + SubscriptionID: "sub-id-789", + TenantID: "tenant-id-123", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com", + ResourceManagerEndpoint: "https://management.azure.com/", + GraphResourceID: "https://graph.windows.net/", + SQLManagementEndpoint: "https://management.core.windows.net:8443/", + GalleryEndpoint: "https://gallery.azure.com/", + ManagementEndpoint: "https://management.core.windows.net/", + } +) + +func TestGetClientSetup(t *testing.T) { + os.Setenv("AZURE_AUTH_LOCATION", filepath.Join(getCredsPath(), "credsutf16le.json")) + setup, err := GetClientSetup("https://management.azure.com") + if err != nil { + t.Logf("GetClientSetup failed, got error %v", err) + t.Fail() + } + + if setup.BaseURI != "https://management.azure.com/" { + t.Logf("auth.BaseURI not set correctly, expected 'https://management.azure.com/', got '%s'", setup.BaseURI) + t.Fail() + } + + if !reflect.DeepEqual(expectedFile, setup.File) { + t.Logf("auth.File not set correctly, expected %v, got %v", expectedFile, setup.File) + t.Fail() + } + + if setup.BearerAuthorizer == nil { + t.Log("auth.Authorizer not set correctly, got nil") + t.Fail() + } +} + +func TestDecodeAndUnmarshal(t *testing.T) { + tests := []string{ + "credsutf8.json", + "credsutf16le.json", + "credsutf16be.json", + } + creds := getCredsPath() + for _, test := range tests { + b, err := ioutil.ReadFile(filepath.Join(creds, test)) + if err != nil { + t.Logf("error reading file '%s': %s", test, err) + t.Fail() + } + decoded, err := decode(b) + if err != nil { + t.Logf("error decoding file '%s': %s", test, err) + t.Fail() + } + var got File + err = json.Unmarshal(decoded, &got) + if err != nil { + t.Logf("error unmarshaling file '%s': %s", test, err) + t.Fail() + } + if !reflect.DeepEqual(expectedFile, got) { + t.Logf("unmarshaled map expected %v, got %v", expectedFile, got) + t.Fail() + } + } +} + +func getCredsPath() string { + gopath := os.Getenv("GOPATH") + return filepath.Join(gopath, "src", "github.com", "Azure", "go-autorest", "testdata") +} + +func areMapsEqual(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + for k := range a { + if a[k] != b[k] { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..fa1835647 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,200 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go new file mode 100644 index 000000000..e8bddbbfd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go @@ -0,0 +1,513 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + headerAuthorization = "Authorization" + longDelay = 5 * time.Second + retryDelay = 10 * time.Millisecond + testLogPrefix = "azure:" +) + +// Use a Client Inspector to set the request identifier. +func ExampleWithClientID() { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL("https://microsoft.com/a/b/c/")) + + c := autorest.Client{Sender: mocks.NewSender()} + c.RequestInspector = WithReturningClientID(uuid) + + autorest.SendWithSender(c, req) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderClientID, req.Header.Get(HeaderClientID)) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) + // Output: + // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 + // Inspector added the x-ms-return-client-request-id header with the value true +} + +func TestWithReturningClientIDReturnsError(t *testing.T) { + var errIn error + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + _, errOut := autorest.Prepare(&http.Request{}, + withErrorPrepareDecorator(&errIn), + WithReturningClientID(uuid)) + + if errOut == nil || errIn != errOut { + t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", + errIn, errOut) + } +} + +func TestWithClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + WithClientID(uuid)) + + if req.Header.Get(HeaderClientID) != uuid { + t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s", + HeaderClientID, uuid, req.Header.Get(HeaderClientID)) + } +} + +func TestWithReturnClientID(t *testing.T) { + b := false + req, _ := autorest.Prepare(&http.Request{}, + WithReturnClientID(b)) + + if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { + t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", + HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) + } +} + +func TestExtractClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderClientID, uuid) + + if ExtractClientID(resp) != uuid { + t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", + HeaderClientID, uuid, ExtractClientID(resp)) + } +} + +func TestExtractRequestID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderRequestID, uuid) + + if ExtractRequestID(resp) != uuid { + t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", + HeaderRequestID, uuid, ExtractRequestID(resp)) + } +} + +func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) { + if !IsAzureError(&RequestError{}) { + t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error") + } +} + +func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) { + if IsAzureError(fmt.Errorf("An Error")) { + t.Fatalf("azure: IsAzureError return true for an non-Azure Service error") + } +} + +func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message") + if e.StatusCode != http.StatusForbidden { + t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode) + } +} + +func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) { + e1 := RequestError{} + e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"} + e1.StatusCode = 200 + e1.RequestID = "A RequestID" + e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(RequestError); !ok { + t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) { + body := ` + + IIS Error page + + Some non-JSON error page + ` + r := mocks.NewResponseWithContent(body) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusBadRequest + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + ok, _ := err.(*RequestError) + if ok != nil { + t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err) + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != body { + t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body) + } +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now." + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\"" + if !reflect.DeepEqual(expected, azErr.Error()) { + t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now.", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + if expected := "InternalError"; azErr.ServiceError.Code != expected { + t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code) + } + if azErr.ServiceError.Message == "" { + t.Fatalf("azure: error message is not unmarshaled properly") + } + b, _ := json.Marshal(*azErr.ServiceError.Details) + if string(b) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` { + t.Fatalf("azure: error details is not unmarshaled properly") + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) { + j := `{ + "Status":"NotFound" + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + + if !reflect.DeepEqual(expected, azErr.ServiceError) { + t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_UnwrappedError(t *testing.T) { + j := `{ + "target": null, + "code": "InternalError", + "message": "Azure is having trouble right now.", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}], + "innererror": [] +}` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatal("azure: returned nil error for proper error response") + } + + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("returned error is not azure.RequestError: %T", err) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Logf("Incorrect StatusCode got: %v want: %d", azErr.StatusCode, expected) + t.Fail() + } + + if expected := "Azure is having trouble right now."; azErr.ServiceError.Message != expected { + t.Logf("Incorrect Message\n\tgot: %q\n\twant: %q", azErr.Message, expected) + t.Fail() + } + + if expected := uuid; azErr.RequestID != expected { + t.Logf("Incorrect request ID\n\tgot: %q\n\twant: %q", azErr.RequestID, expected) + t.Fail() + } + + expectedServiceErrorDetails := `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` + if azErr.ServiceError == nil { + t.Logf("`ServiceError` was nil when it shouldn't have been.") + t.Fail() + } else if azErr.ServiceError.Details == nil { + t.Logf("`ServiceError.Details` was nil when it should have been %q", expectedServiceErrorDetails) + t.Fail() + } else if details, _ := json.Marshal(*azErr.ServiceError.Details); expectedServiceErrorDetails != string(details) { + t.Logf("Error detaisl was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(details), expectedServiceErrorDetails) + t.Fail() + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestRequestErrorString_WithError(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Conflict", + "details": [{"code": "conflict1", "message":"error message1"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, _ := err.(*RequestError) + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]" + if expected != azErr.Error() { + t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } +} + +func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + *e = fmt.Errorf("azure: Faux Prepare Error") + return r, *e + }) + } +} + +func withAsyncResponseDecorator(n int) autorest.SendDecorator { + i := 0 + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + if i < n { + resp.StatusCode = http.StatusCreated + resp.Header = http.Header{} + resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL) + i++ + } else { + resp.StatusCode = http.StatusOK + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + } + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000..3e226fe9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,65 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + return homedir.Expand("~/.azure/azureProfile.json") +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000..b80b8e3fa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,103 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: strconv.Itoa(int(difference.Seconds())), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +func AccessTokensPath() (string, error) { + return homedir.Expand("~/.azure/accessTokens.json") +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000..30c4351a5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,144 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "strings" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go new file mode 100644 index 000000000..2718149ad --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go @@ -0,0 +1,230 @@ +// test +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "testing" +) + +func TestEnvironmentFromName(t *testing.T) { + name := "azurechinacloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "AzureChinaCloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "azuregermancloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "AzureGermanCloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "azurepubliccloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "AzurePublicCloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "azureusgovernmentcloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "AzureUSGovernmentCloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "thisisnotarealcloudenv" + if _, err := EnvironmentFromName(name); err == nil { + t.Errorf("Expected to get an error for %q", name) + } +} + +func TestDeserializeEnvironment(t *testing.T) { + env := `{ + "name": "--name--", + "ActiveDirectoryEndpoint": "--active-directory-endpoint--", + "galleryEndpoint": "--gallery-endpoint--", + "graphEndpoint": "--graph-endpoint--", + "keyVaultDNSSuffix": "--key-vault-dns-suffix--", + "keyVaultEndpoint": "--key-vault-endpoint--", + "managementPortalURL": "--management-portal-url--", + "publishSettingsURL": "--publish-settings-url--", + "resourceManagerEndpoint": "--resource-manager-endpoint--", + "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", + "serviceManagementEndpoint": "--service-management-endpoint--", + "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", + "storageEndpointSuffix": "--storage-endpoint-suffix--", + "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", + "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", + "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--", + "containerRegistryDNSSuffix": "--container-registry-dns-suffix--" + }` + + testSubject := Environment{} + err := json.Unmarshal([]byte(env), &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if "--name--" != testSubject.Name { + t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name) + } + if "--management-portal-url--" != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL) + } + if "--publish-settings-url--" != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL) + } + if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint) + } + if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint) + } + if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint) + } + if "--gallery-endpoint--" != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint) + } + if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint) + } + if "--graph-endpoint--" != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint) + } + if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix) + } + if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix) + } + if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix) + } + if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix) + } + if "--asm-vm-dns-suffix--" != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be \"--asm-vm-dns-suffix--\", but got %q", testSubject.ServiceManagementVMDNSSuffix) + } + if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix) + } + if "--container-registry-dns-suffix--" != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be \"--container-registry-dns-suffix--\", but got %q", testSubject.ContainerRegistryDNSSuffix) + } +} + +func TestRoundTripSerialization(t *testing.T) { + env := Environment{ + Name: "--unit-test--", + ManagementPortalURL: "--management-portal-url", + PublishSettingsURL: "--publish-settings-url--", + ServiceManagementEndpoint: "--service-management-endpoint--", + ResourceManagerEndpoint: "--resource-management-endpoint--", + ActiveDirectoryEndpoint: "--active-directory-endpoint--", + GalleryEndpoint: "--gallery-endpoint--", + KeyVaultEndpoint: "--key-vault--endpoint--", + GraphEndpoint: "--graph-endpoint--", + StorageEndpointSuffix: "--storage-endpoint-suffix--", + SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", + TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", + KeyVaultDNSSuffix: "--key-vault-dns-suffix--", + ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", + ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", + ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", + ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", + } + + bytes, err := json.Marshal(env) + if err != nil { + t.Fatalf("failed to marshal: %s", err) + } + + testSubject := Environment{} + err = json.Unmarshal(bytes, &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if env.Name != testSubject.Name { + t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name) + } + if env.ManagementPortalURL != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL) + } + if env.PublishSettingsURL != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL) + } + if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint) + } + if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint) + } + if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint) + } + if env.GalleryEndpoint != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint) + } + if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint) + } + if env.GraphEndpoint != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint) + } + if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix) + } + if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix) + } + if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix { + t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix) + } + if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix) + } + if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix) + } + if env.ServiceManagementVMDNSSuffix != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be %q, but got %q", env.ServiceManagementVMDNSSuffix, testSubject.ServiceManagementVMDNSSuffix) + } + if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix) + } + if env.ContainerRegistryDNSSuffix != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be %q, but got %q", env.ContainerRegistryDNSSuffix, testSubject.ContainerRegistryDNSSuffix) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md b/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md new file mode 100644 index 000000000..b87e173fa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md @@ -0,0 +1,127 @@ +# autorest azure example + +## Usage (device mode) + +This shows how to use the example for device auth. + +1. Execute this. It will save your token to /tmp/azure-example-token: + + ``` + ./example -tenantId "13de0a15-b5db-44b9-b682-b4ba82afbd29" -subscriptionId "aff271ee-e9be-4441-b9bb-42f5af4cbaeb" -mode "device" -tokenCachePath "/tmp/azure-example-token" + ``` + +2. Execute it again, it will load the token from cache and not prompt for auth again. + +## Usage (certificate mode) + +This example covers how to make an authenticated call to the Azure Resource Manager APIs, using certificate-based authentication. + +0. Export some required variables + + ``` + export SUBSCRIPTION_ID="aff271ee-e9be-4441-b9bb-42f5af4cbaeb" + export TENANT_ID="13de0a15-b5db-44b9-b682-b4ba82afbd29" + export RESOURCE_GROUP="someresourcegroup" + ``` + + * replace both values with your own + +1. Create a private key + + ``` + openssl genrsa -out "example.key" 2048 + ``` + + + +2. Create the certificate + + ``` + openssl req -new -key "example.key" -subj "/CN=example" -out "example.csr" + + openssl x509 -req -in "example.csr" -signkey "example.key" -out "example.crt" -days 10000 + ``` + + + +3. Create the PKCS12 version of the certificate (with no password) + + ``` + openssl pkcs12 -export -out "example.pfx" -inkey "example.key" -in "example.crt" -passout pass: + ``` + + + +4. Register a new Azure AD Application with the certificate contents + + ``` + certificateContents="$(tail -n+2 "example.key" | head -n-1)" + + azure ad app create \ + --name "example-azuread-app" \ + --home-page="http://example-azuread-app/home" \ + --identifier-uris "http://example-azuread-app/app" \ + --key-usage "Verify" \ + --end-date "2020-01-01" \ + --key-value "${certificateContents}" + ``` + + + +5. Create a new service principal using the "Application Id" from the previous step + + ``` + azure ad sp create "APPLICATION_ID" + ``` + + * Replace APPLICATION_ID with the "Application Id" returned in step 4 + + + +6. Grant your service principal necessary permissions + + ``` + azure role assignment create \ + --resource-group "${RESOURCE_GROUP}" \ + --roleName "Contributor" \ + --subscription "${SUBSCRIPTION_ID}" \ + --spn "http://example-azuread-app/app" + ``` + + * Replace SUBSCRIPTION_ID with your subscription id + * Replace RESOURCE_GROUP with the resource group for the assignment + * Ensure that the `spn` parameter matches an `identifier-url` from Step 4 + + + +7. Run this example app to see your resource groups + + ``` + go run main.go \ + --tenantId="${TENANT_ID}" \ + --subscriptionId="${SUBSCRIPTION_ID}" \ + --applicationId="http://example-azuread-app/app" \ + --certificatePath="certificate.pfx" + ``` + + +You should see something like this as output: + +``` +2015/11/08 18:28:39 Using these settings: +2015/11/08 18:28:39 * certificatePath: certificate.pfx +2015/11/08 18:28:39 * applicationID: http://example-azuread-app/app +2015/11/08 18:28:39 * tenantID: 13de0a15-b5db-44b9-b682-b4ba82afbd29 +2015/11/08 18:28:39 * subscriptionID: aff271ee-e9be-4441-b9bb-42f5af4cbaeb +2015/11/08 18:28:39 loading certificate... +2015/11/08 18:28:39 retrieve oauth token... +2015/11/08 18:28:39 querying the list of resource groups... +2015/11/08 18:28:50 +2015/11/08 18:28:50 Groups: {"value":[{"id":"/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/kube-66f30810","name":"kube-66f30810","location":"westus","tags":{},"properties":{"provisioningState":"Succeeded"}}]} +``` + + + +## Notes + +You may need to wait sometime between executing step 4, step 5 and step 6. If you issue those requests too quickly, you might hit an AD server that is not consistent with the server where the resource was created. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go b/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go new file mode 100644 index 000000000..f887097f6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go @@ -0,0 +1,272 @@ +package main + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "golang.org/x/crypto/pkcs12" +) + +const ( + resourceGroupURLTemplate = "https://management.azure.com" + apiVersion = "2015-01-01" + nativeAppClientID = "a87032a7-203c-4bf7-913c-44c50d23409a" + resource = "https://management.core.windows.net/" +) + +var ( + mode string + tenantID string + subscriptionID string + applicationID string + + tokenCachePath string + forceRefresh bool + impatient bool + + certificatePath string +) + +func init() { + flag.StringVar(&mode, "mode", "device", "mode of operation for SPT creation") + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/pfx certificate") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&subscriptionID, "subscriptionId", "", "subscription id") + flag.StringVar(&tokenCachePath, "tokenCachePath", "", "location of oauth token cache") + flag.BoolVar(&forceRefresh, "forceRefresh", false, "pass true to force a token refresh") + + flag.Parse() + + log.Printf("mode(%s) certPath(%s) appID(%s) tenantID(%s), subID(%s)\n", + mode, certificatePath, applicationID, tenantID, subscriptionID) + + if mode == "certificate" && + (strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "") { + log.Fatalln("Bad usage. Using certificate mode. Please specify tenantID, subscriptionID") + } + + if mode != "certificate" && mode != "device" { + log.Fatalln("Bad usage. Mode must be one of 'certificate' or 'device'.") + } + + if mode == "device" && strings.TrimSpace(applicationID) == "" { + log.Println("Using device mode auth. Will use `azkube` clientID since none was specified on the comand line.") + applicationID = nativeAppClientID + } + + if mode == "certificate" && strings.TrimSpace(certificatePath) == "" { + log.Fatalln("Bad usage. Mode 'certificate' requires the 'certificatePath' argument.") + } + + if strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "" || strings.TrimSpace(applicationID) == "" { + log.Fatalln("Bad usage. Must specify the 'tenantId' and 'subscriptionId'") + } +} + +func getSptFromCachedToken(oauthConfig adal.OAuthConfig, clientID, resource string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + token, err := adal.LoadToken(tokenCachePath) + if err != nil { + return nil, fmt.Errorf("failed to load token from cache: %v", err) + } + + spt, _ := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + + return spt, nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +func getSptFromCertificate(oauthConfig adal.OAuthConfig, clientID, resource, certicatePath string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spt, _ := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + clientID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + + return spt, nil +} + +func getSptFromDeviceFlow(oauthConfig adal.OAuthConfig, clientID, resource string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, oauthConfig, clientID, resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + + fmt.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + + return spt, nil +} + +func printResourceGroups(client *autorest.Client) error { + p := map[string]interface{}{"subscription-id": subscriptionID} + q := map[string]interface{}{"api-version": apiVersion} + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL(resourceGroupURLTemplate), + autorest.WithPathParameters("/subscriptions/{subscription-id}/resourcegroups", p), + autorest.WithQueryParameters(q)) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return err + } + + value := struct { + ResourceGroups []struct { + Name string `json:"name"` + } `json:"value"` + }{} + + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + err = dec.Decode(&value) + if err != nil { + return err + } + + var groupNames = make([]string, len(value.ResourceGroups)) + for i, name := range value.ResourceGroups { + groupNames[i] = name.Name + } + + log.Println("Groups:", strings.Join(groupNames, ", ")) + return err +} + +func saveToken(spt adal.Token) { + if tokenCachePath != "" { + err := adal.SaveToken(tokenCachePath, 0600, spt) + if err != nil { + log.Println("error saving token", err) + } else { + log.Println("saved token to", tokenCachePath) + } + } +} + +func main() { + var spt *adal.ServicePrincipalToken + var err error + + callback := func(t adal.Token) error { + log.Println("refresh callback was called") + saveToken(t) + return nil + } + + oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) + if err != nil { + panic(err) + } + + if tokenCachePath != "" { + log.Println("tokenCachePath specified; attempting to load from", tokenCachePath) + spt, err = getSptFromCachedToken(*oauthConfig, applicationID, resource, callback) + if err != nil { + spt = nil // just in case, this is the condition below + log.Println("loading from cache failed:", err) + } + } + + if spt == nil { + log.Println("authenticating via 'mode'", mode) + switch mode { + case "device": + spt, err = getSptFromDeviceFlow(*oauthConfig, applicationID, resource, callback) + case "certificate": + spt, err = getSptFromCertificate(*oauthConfig, applicationID, resource, certificatePath, callback) + } + if err != nil { + log.Fatalln("failed to retrieve token:", err) + } + + // should save it as soon as you get it since Refresh won't be called for some time + if tokenCachePath != "" { + saveToken(spt.Token) + } + } + + client := &autorest.Client{} + client.Authorizer = autorest.NewBearerAuthorizer(spt) + + printResourceGroups(client) + + if forceRefresh { + err = spt.Refresh() + if err != nil { + panic(err) + } + printResourceGroups(client) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000..40d5f5ba0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,188 @@ +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + err = register(client, r, re) + if err != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) + } + } + } + return resp, errors.New("failed request and resource provider registration") + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil { + if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { + detail := (*re.ServiceError.Details)[0].(map[string]interface{}) + return detail["target"].(string), nil + } + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client.Sender, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) + if !delayed { + autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go new file mode 100644 index 000000000..5c54b04a7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go @@ -0,0 +1,67 @@ +package azure + +import ( + "net/http" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestDoRetryWithRegistration(t *testing.T) { + client := mocks.NewSender() + // first response, should retry because it is a transient error + client.AppendResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError)) + // response indicates the resource provider has not been registered + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "error":{ + "code":"MissingSubscriptionRegistration", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions.", + "details":[ + { + "code":"MissingSubscriptionRegistration", + "target":"Microsoft.EventGrid", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions." + } + ] + } +} +`), http.StatusConflict, "MissingSubscriptionRegistration")) + // first poll response, still not ready + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "registrationState": "Registering" +} +`), http.StatusOK, "200 OK")) + // last poll response, respurce provider has been registered + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "registrationState": "Registered" +} +`), http.StatusOK, "200 OK")) + // retry original request, response is successful + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + req := mocks.NewRequestForURL("https://lol/subscriptions/rofl") + req.Body = mocks.NewBody("lolol") + r, err := autorest.SendWithSender(client, req, + DoRetryWithRegistration(autorest.Client{ + PollingDelay: time.Second, + PollingDuration: time.Second * 10, + RetryAttempts: 5, + RetryDuration: time.Second, + Sender: client, + }), + ) + if err != nil { + t.Fatalf("got error: %v", err) + } + + autorest.Respond(r, + autorest.ByDiscardingBody(), + autorest.ByClosing(), + ) + + if r.StatusCode != http.StatusOK { + t.Fatalf("azure: Sender#DoRetryWithRegistration -- Got: StatusCode %v; Want: StatusCode 200 OK", r.StatusCode) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..c857e7611 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,250 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "runtime" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 +) + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: 30 * time.Second, + UserAgent: defaultUserAgent, + } + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + resp, err := SendWithSender(c.sender(), r) + Respond(resp, + c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client_test.go b/vendor/github.com/Azure/go-autorest/autorest/client_test.go new file mode 100644 index 000000000..a6ed6a08c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client_test.go @@ -0,0 +1,356 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "reflect" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestLoggingInspectorWithInspection(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(mocks.NewRequestWithContent("Content"), + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + if _, err := Prepare(r, + c.WithInspection()); err != nil { + t.Error(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(r, + c.WithInspection()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body") + } +} + +func TestLoggingInspectorByInspecting(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(mocks.NewResponseWithContent("Content"), + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + if err := Respond(r, + c.ByInspecting()); err != nil { + t.Fatal(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(r, + c.ByInspecting()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body") + } +} + +func TestNewClientWithUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + completeUA := fmt.Sprintf("%s %s", defaultUserAgent, ua) + + if c.UserAgent != completeUA { + t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } +} + +func TestAddToUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + ext := "extension" + err := c.AddToUserAgent(ext) + if err != nil { + t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err) + } + completeUA := fmt.Sprintf("%s %s %s", defaultUserAgent, ua, ext) + + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } + + err = c.AddToUserAgent("") + if err == nil { + t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil", + fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)) + } + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } +} + +func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.sender()) != "*http.Client" { + t.Fatal("autorest: Client#sender failed to return http.Client by default") + } +} + +func TestClientSenderReturnsSetSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + if c.sender() != s { + t.Fatal("autorest: Client#sender failed to return set Sender") + } +} + +func TestClientDoInvokesSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() != 1 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientDoSetsUserAgent(t *testing.T) { + ua := "UserAgent" + c := Client{UserAgent: ua} + r := mocks.NewRequest() + s := mocks.NewSender() + c.Sender = s + + c.Do(r) + + if r.UserAgent() != ua { + t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", + http.CanonicalHeaderKey(headerUserAgent), r.UserAgent()) + } +} + +func TestClientDoSetsAuthorization(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + c.Do(r) + if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { + t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s", + http.CanonicalHeaderKey(headerAuthorization), + r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) + } +} + +func TestClientDoInvokesRequestInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{RequestInspector: i.WithInspection(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the RequestInspector") + } +} + +func TestClientDoInvokesResponseInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{ResponseInspector: i.ByInspecting(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector") + } +} + +func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + _, err := c.Do(&http.Request{}) + if err == nil { + t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed") + } +} + +func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() > 0 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default") + } +} + +func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the set Authorizer") + } +} + +func TestClientWithAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + req, _ := Prepare(&http.Request{}, + c.WithAuthorization()) + + if req.Header.Get(headerAuthorization) == "" { + t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") + } +} + +func TestClientWithInspection(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.RequestInspector = r.WithInspection() + + Prepare(&http.Request{}, + c.WithInspection()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector") + } +} + +func TestClientWithInspectionSetsDefault(t *testing.T) { + c := Client{} + + r1 := &http.Request{} + r2, _ := Prepare(r1, + c.WithInspection()) + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector") + } +} + +func TestClientByInspecting(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.ResponseInspector = r.ByInspecting() + + Respond(&http.Response{}, + c.ByInspecting()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector") + } +} + +func TestClientByInspectingSetsDefault(t *testing.T) { + c := Client{} + + r := &http.Response{} + Respond(r, + c.ByInspecting()) + + if !reflect.DeepEqual(r, &http.Response{}) { + t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector") + } +} + +func randomString(n int) string { + const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := make([]byte, n) + for i := range s { + s[i] = chars[r.Intn(len(chars))] + } + return string(s) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..c45710656 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go new file mode 100644 index 000000000..4a40bbc3e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go @@ -0,0 +1,237 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + + t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") + if err != nil { + fmt.Println(err) + } + + // Date acts as time.Time when the receiver + if d.Before(t) { + fmt.Printf("Before ") + } else { + fmt.Printf("After ") + } + + // Convert Date when needing a time.Time + if t.After(d.ToTime()) { + fmt.Printf("After") + } else { + fmt.Printf("Before") + } + // Output: Before After +} + +func ExampleDate_MarshalBinary() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalBinary() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalJSON() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03" +} + +func ExampleDate_UnmarshalJSON() { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "2001-02-03"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Date) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalText() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalText() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func TestDateString(t *testing.T) { + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: String failed (%v)", err) + } + if d.String() != "2001-02-03" { + t.Fatalf("date: String failed (%v)", d.String()) + } +} + +func TestDateBinaryRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: MarshalBinary failed (%v)", err) + } + + d2 := Date{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestDateJSONRoundTrip(t *testing.T) { + type s struct { + Date Date `json:"date"` + } + var err error + d1 := s{} + d1.Date, err = ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestDateTextRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: MarshalText failed (%v)", err) + } + d2 := Date{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestDateToTime(t *testing.T) { + var d Date + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestDateUnmarshalJSONReturnsError(t *testing.T) { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "February 3, 2001"}` + + if err := json.Unmarshal([]byte(j), &d); err == nil { + t.Fatal("date: Date failed to return error for malformed JSON date") + } +} + +func TestDateUnmarshalTextReturnsError(t *testing.T) { + d := Date{} + txt := "February 3, 2001" + + if err := d.UnmarshalText([]byte(txt)); err == nil { + t.Fatal("date: Date failed to return error for malformed Text date") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..b453fad04 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go new file mode 100644 index 000000000..d8c811707 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go @@ -0,0 +1,277 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseTime() { + d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + fmt.Println(d) + // Output: 2001-02-03 04:05:06 +0000 UTC +} + +func ExampleTime_MarshalBinary() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalBinary() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalJSON() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03T04:05:06Z" +} + +func ExampleTime_UnmarshalJSON() { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06Z"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalText() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalText() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func TestUnmarshalTextforInvalidDate(t *testing.T) { + d := Time{} + dt := "2001-02-03T04:05:06AAA" + + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestUnmarshalJSONforInvalidDate(t *testing.T) { + d := Time{} + dt := `"2001-02-03T04:05:06AAA"` + + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestTimeString(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + if d.String() != "2001-02-03T04:05:06Z" { + t.Fatalf("date: Time#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForError(t *testing.T) { + d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: Time#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: Time#MarshalBinary failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTrip(t *testing.T) { + type s struct { + Time Time `json:"datetime"` + } + + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + + d1 := s{Time: Time{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: Time#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: Time#MarshalText failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTime(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + d := Time{ti} + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestUnmarshalJSONNoOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06.789"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONPosOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1980-01-02T00:11:35.01+01:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONNegOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1492-10-12T10:15:01.789-08:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalTextNoOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextPosOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06+00:30" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextNegOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06-11:00" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000..48fb39ba9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go new file mode 100644 index 000000000..dfd640513 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go @@ -0,0 +1,226 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleTimeRFC1123() { + d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2006-01-02 15:04:05 +0000 MST +} + +func ExampleTimeRFC1123_MarshalBinary() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + b, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(b)) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_UnmarshalBinary() { + d := TimeRFC1123{} + t := "Mon, 02 Jan 2006 15:04:05 MST" + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalJSON() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "Mon, 02 Jan 2006 15:04:05 MST" +} + +func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) { + ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC) + d := TimeRFC1123{ti} + if _, err := json.Marshal(d); err == nil { + t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date") + } +} + +func ExampleTimeRFC1123_UnmarshalJSON() { + var d struct { + Time TimeRFC1123 `json:"datetime"` + } + j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalText() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func ExampleTimeRFC1123_UnmarshalText() { + d := TimeRFC1123{} + t := "Sat, 03 Feb 2001 04:05:06 UTC" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) { + dt := `"Mon, 02 Jan 2000000 15:05 MST"` + d := TimeRFC1123{} + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) { + dt := "Mon, 02 Jan 2000000 15:05 MST" + d := TimeRFC1123{} + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestTimeStringRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" { + t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) { + d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: TimeRFC1123#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTripRfc1123(t *testing.T) { + type s struct { + Time TimeRFC1123 `json:"datetime"` + } + var err error + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := s{Time: TimeRFC1123{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTimeRFC1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + d := TimeRFC1123{ti} + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..7073959b2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go new file mode 100644 index 000000000..3fa347c00 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go @@ -0,0 +1,283 @@ +// +build go1.7 + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "testing" + "time" +) + +func ExampleUnixTime_MarshalJSON() { + epoch := UnixTime(UnixEpoch()) + text, _ := json.Marshal(epoch) + fmt.Print(string(text)) + // Output: 0 +} + +func ExampleUnixTime_UnmarshalJSON() { + var myTime UnixTime + json.Unmarshal([]byte("1.3e2"), &myTime) + fmt.Printf("%v", time.Time(myTime)) + // Output: 1970-01-01 00:02:10 +0000 UTC +} + +func TestUnixTime_MarshalJSON(t *testing.T) { + testCases := []time.Time{ + UnixEpoch().Add(-1 * time.Second), // One second befote the Unix Epoch + time.Date(2017, time.April, 14, 20, 27, 47, 0, time.UTC), // The time this test was written + UnixEpoch(), + time.Date(1800, 01, 01, 0, 0, 0, 0, time.UTC), + time.Date(2200, 12, 29, 00, 01, 37, 82, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + var actual, expected float64 + var marshaled []byte + + target := UnixTime(tc) + expected = float64(target.Duration().Nanoseconds()) / 1e9 + + if temp, err := json.Marshal(target); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + dec := json.NewDecoder(bytes.NewReader(marshaled)) + if err := dec.Decode(&actual); err != nil { + subT.Error(err) + return + } + + diff := math.Abs(actual - expected) + subT.Logf("\ngot :\t%g\nwant:\t%g\ndiff:\t%g", actual, expected, diff) + if diff > 1e-9 { //Must be within 1 nanosecond of one another + subT.Fail() + } + }) + } +} + +func TestUnixTime_UnmarshalJSON(t *testing.T) { + testCases := []struct { + text string + expected time.Time + }{ + {"1", UnixEpoch().Add(time.Second)}, + {"0", UnixEpoch()}, + {"1492203742", time.Date(2017, time.April, 14, 21, 02, 22, 0, time.UTC)}, // The time this test was written + {"-1", time.Date(1969, time.December, 31, 23, 59, 59, 0, time.UTC)}, + {"1.5", UnixEpoch().Add(1500 * time.Millisecond)}, + {"0e1", UnixEpoch()}, // See http://json.org for 'number' format definition. + {"1.3e+2", UnixEpoch().Add(130 * time.Second)}, + {"1.6E-10", UnixEpoch()}, // This is so small, it should get truncated into the UnixEpoch + {"2E-6", UnixEpoch().Add(2 * time.Microsecond)}, + {"1.289345e9", UnixEpoch().Add(1289345000 * time.Second)}, + {"1e-9", UnixEpoch().Add(time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run(tc.text, func(subT *testing.T) { + var rehydrated UnixTime + if err := json.Unmarshal([]byte(tc.text), &rehydrated); err != nil { + subT.Error(err) + return + } + + if time.Time(rehydrated) != tc.expected { + subT.Logf("\ngot: \t%v\nwant:\t%v\ndiff:\t%v", time.Time(rehydrated), tc.expected, time.Time(rehydrated).Sub(tc.expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_JSONRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + time.Date(2005, time.November, 5, 0, 0, 0, 0, time.UTC), // The day V for Vendetta (film) was released. + UnixEpoch().Add(-6 * time.Second), + UnixEpoch().Add(800 * time.Hour), + UnixEpoch().Add(time.Nanosecond), + time.Date(2015, time.September, 05, 4, 30, 12, 9992, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + subject := UnixTime(tc) + var marshaled []byte + if temp, err := json.Marshal(subject); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := json.Unmarshal(marshaled, &unmarshaled); err != nil { + subT.Error(err) + } + + actual := time.Time(unmarshaled) + diff := actual.Sub(tc) + subT.Logf("\ngot :\t%s\nwant:\t%s\ndiff:\t%s", actual.String(), tc.String(), diff.String()) + + if diff > time.Duration(100) { // We lose some precision be working in floats. We shouldn't lose more than 100 nanoseconds. + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalBinary(t *testing.T) { + testCases := []struct { + expected int64 + subject time.Time + }{ + {0, UnixEpoch()}, + {-15 * int64(time.Second), UnixEpoch().Add(-15 * time.Second)}, + {54, UnixEpoch().Add(54 * time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc.subject).MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled int64 + if err := binary.Read(bytes.NewReader(marshaled), binary.LittleEndian, &unmarshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != tc.expected { + subT.Logf("\ngot: \t%d\nwant:\t%d", unmarshaled, tc.expected) + subT.Fail() + } + }) + } +} + +func TestUnixTime_BinaryRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(800 * time.Minute), + UnixEpoch().Add(7 * time.Hour), + UnixEpoch().Add(-1 * time.Nanosecond), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + original := UnixTime(tc) + var marshaled []byte + + if temp, err := original.MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var traveled UnixTime + if err := traveled.UnmarshalBinary(marshaled); err != nil { + subT.Error(err) + return + } + + if traveled != original { + subT.Logf("\ngot: \t%s\nwant:\t%s", time.Time(original).String(), time.Time(traveled).String()) + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalText(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(45 * time.Second), + UnixEpoch().Add(time.Nanosecond), + UnixEpoch().Add(-100000 * time.Second), + } + + for _, tc := range testCases { + expected, _ := tc.MarshalText() + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc).MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + if string(marshaled) != string(expected) { + subT.Logf("\ngot: \t%s\nwant:\t%s", string(marshaled), string(expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_TextRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(-1 * time.Nanosecond), + UnixEpoch().Add(1 * time.Nanosecond), + time.Date(2017, time.April, 17, 21, 00, 00, 00, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + unixTC := UnixTime(tc) + + var marshaled []byte + + if temp, err := unixTC.MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := unmarshaled.UnmarshalText(marshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != unixTC { + t.Logf("\ngot: \t%s\nwant:\t%s", time.Time(unmarshaled).String(), tc.String()) + t.Fail() + } + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000..12addf0eb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..f724f3332 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error_test.go b/vendor/github.com/Azure/go-autorest/autorest/error_test.go new file mode 100644 index 000000000..f4f5d75ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error_test.go @@ -0,0 +1,202 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "testing" +) + +func TestNewErrorWithError_AssignsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.PackageType != "packageType" { + t.Fatalf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType) + } +} + +func TestNewErrorWithError_AssignsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Method != "method" { + t.Fatalf("autorest: Error failed to set method -- expected %v, received %v", "method", e.Method) + } +} + +func TestNewErrorWithError_AssignsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Message != "message" { + t.Fatalf("autorest: Error failed to set message -- expected %v, received %v", "message", e.Message) + } +} + +func TestNewErrorWithError_AssignsUndefinedStatusCodeIfRespNil(t *testing.T) { + e := NewErrorWithError(nil, "packageType", "method", nil, "message") + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithError_AssignsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithError_AcceptsArgs(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message %s", "arg") + + if matched, _ := regexp.MatchString(`.*arg.*`, e.Message); !matched { + t.Fatalf("autorest: Error failed to apply message arguments -- expected %v, received %v", + `.*arg.*`, e.Message) + } +} + +func TestNewErrorWithError_AssignsError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if e.Original != err { + t.Fatalf("autorest: Error failed to set error -- expected %v, received %v", err, e.Original) + } +} + +func TestNewErrorWithResponse_ContainsStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithResponse_nilResponse_ReportsUndefinedStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", nil, "message") + + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithResponse_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithResponse("packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithError(nil, "packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_DoesNotWrapADetailedError(t *testing.T) { + e1 := NewError("packageType1", "method1", "message1 %s", "arg1") + e2 := NewErrorWithError(e1, "packageType2", "method2", nil, "message2 %s", "arg2") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("autorest: NewErrorWithError incorrectly wrapped a DetailedError -- expected %v, received %v", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(DetailedError); !ok { + t.Fatalf("autorest: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestDetailedError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#Error failed to return original error message -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*packageType.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include PackageType -- expected %v, received %v", + `.*packageType.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*method.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Method -- expected %v, received %v", + `.*method.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*message.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Message -- expected %v, received %v", + `.*message.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if matched, _ := regexp.MatchString(`.*400.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Status Code -- expected %v, received %v", + `.*400.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsOriginal(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Original error -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorSkipsOriginal(t *testing.T) { + e := NewError("packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*Original.*`, e.Error()); matched { + t.Fatalf("autorest: Error#String included missing Original error -- unexpected %v, received %v", + `.*Original.*`, e.Error()) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go new file mode 100644 index 000000000..7b1f00d3f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go @@ -0,0 +1,151 @@ +package mocks + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "time" +) + +const ( + // TestAuthorizationHeader is a faux HTTP Authorization header value + TestAuthorizationHeader = "BEARER SECRETTOKEN" + + // TestBadURL is a malformed URL + TestBadURL = " " + + // TestDelay is the Retry-After delay used in tests. + TestDelay = 0 * time.Second + + // TestHeader is the header used in tests. + TestHeader = "x-test-header" + + // TestURL is the URL used in tests. + TestURL = "https://microsoft.com/a/b/c/" + + // TestAzureAsyncURL is a URL used in Azure asynchronous tests + TestAzureAsyncURL = "https://microsoft.com/a/b/c/async" + + // TestLocationURL is a URL used in Azure asynchronous tests + TestLocationURL = "https://microsoft.com/a/b/c/location" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// NewRequest instantiates a new request. +func NewRequest() *http.Request { + return NewRequestWithContent("") +} + +// NewRequestWithContent instantiates a new request using the passed string for the body content. +func NewRequestWithContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) + return r +} + +// NewRequestWithCloseBody instantiates a new request. +func NewRequestWithCloseBody() *http.Request { + return NewRequestWithCloseBodyContent("request body") +} + +// NewRequestWithCloseBodyContent instantiates a new request using the passed string for the body content. +func NewRequestWithCloseBodyContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBodyClose(c)) + return r +} + +// NewRequestForURL instantiates a new request using the passed URL. +func NewRequestForURL(u string) *http.Request { + r, err := http.NewRequest("GET", u, NewBody("")) + if err != nil { + panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) + } + return r +} + +// NewResponse instantiates a new response. +func NewResponse() *http.Response { + return NewResponseWithContent("") +} + +// NewResponseWithContent instantiates a new response with the passed string as the body content. +func NewResponseWithContent(c string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: NewBody(c), + Request: NewRequest(), + } +} + +// NewResponseWithStatus instantiates a new response using the passed string and integer as the +// status and status code. +func NewResponseWithStatus(s string, c int) *http.Response { + resp := NewResponse() + resp.Status = s + resp.StatusCode = c + return resp +} + +// NewResponseWithBodyAndStatus instantiates a new response using the specified mock body, +// status and status code +func NewResponseWithBodyAndStatus(body *Body, c int, s string) *http.Response { + resp := NewResponse() + resp.Body = body + resp.Status = s + resp.StatusCode = c + return resp +} + +// SetResponseHeader adds a header to the passed response. +func SetResponseHeader(resp *http.Response, h string, v string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + resp.Header.Set(h, v) +} + +// SetResponseHeaderValues adds a header containing all the passed string values. +func SetResponseHeaderValues(resp *http.Response, h string, values []string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + for _, v := range values { + resp.Header.Add(h, v) + } +} + +// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. +func SetAcceptedHeaders(resp *http.Response) { + SetLocationHeader(resp, TestURL) + SetRetryHeader(resp, TestDelay) +} + +// SetLocationHeader adds the Location header. +func SetLocationHeader(resp *http.Response, location string) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) +} + +// SetRetryHeader adds the Retry-After header. +func SetRetryHeader(resp *http.Response, delay time.Duration) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go new file mode 100644 index 000000000..85a049763 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go @@ -0,0 +1,176 @@ +/* +Package mocks provides mocks and helpers used in testing. +*/ +package mocks + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io" + "net/http" +) + +// Body implements acceptable body over a string. +type Body struct { + s string + b []byte + isOpen bool + closeAttempts int +} + +// NewBody creates a new instance of Body. +func NewBody(s string) *Body { + return (&Body{s: s}).reset() +} + +// NewBodyClose creates a new instance of Body. +func NewBodyClose(s string) *Body { + return &Body{s: s} +} + +// Read reads into the passed byte slice and returns the bytes read. +func (body *Body) Read(b []byte) (n int, err error) { + if !body.IsOpen() { + return 0, fmt.Errorf("ERROR: Body has been closed") + } + if len(body.b) == 0 { + return 0, io.EOF + } + n = copy(b, body.b) + body.b = body.b[n:] + return n, nil +} + +// Close closes the body. +func (body *Body) Close() error { + if body.isOpen { + body.isOpen = false + body.closeAttempts++ + } + return nil +} + +// CloseAttempts returns the number of times Close was called. +func (body *Body) CloseAttempts() int { + return body.closeAttempts +} + +// IsOpen returns true if the Body has not been closed, false otherwise. +func (body *Body) IsOpen() bool { + return body.isOpen +} + +func (body *Body) reset() *Body { + body.isOpen = true + body.b = []byte(body.s) + return body +} + +// Sender implements a simple null sender. +type Sender struct { + attempts int + responses []*http.Response + repeatResponse []int + err error + repeatError int + emitErrorAfter int +} + +// NewSender creates a new instance of Sender. +func NewSender() *Sender { + return &Sender{} +} + +// Do accepts the passed request and, based on settings, emits a response and possible error. +func (c *Sender) Do(r *http.Request) (resp *http.Response, err error) { + c.attempts++ + + if len(c.responses) > 0 { + resp = c.responses[0] + if resp != nil { + if b, ok := resp.Body.(*Body); ok { + b.reset() + } + } + c.repeatResponse[0]-- + if c.repeatResponse[0] == 0 { + c.responses = c.responses[1:] + c.repeatResponse = c.repeatResponse[1:] + } + } else { + resp = NewResponse() + } + if resp != nil { + resp.Request = r + } + + if c.emitErrorAfter > 0 { + c.emitErrorAfter-- + } else if c.err != nil { + err = c.err + c.repeatError-- + if c.repeatError == 0 { + c.err = nil + } + } + + return +} + +// AppendResponse adds the passed http.Response to the response stack. +func (c *Sender) AppendResponse(resp *http.Response) { + c.AppendAndRepeatResponse(resp, 1) +} + +// AppendAndRepeatResponse adds the passed http.Response to the response stack along with a +// repeat count. A negative repeat count will return the response for all remaining calls to Do. +func (c *Sender) AppendAndRepeatResponse(resp *http.Response, repeat int) { + if c.responses == nil { + c.responses = []*http.Response{resp} + c.repeatResponse = []int{repeat} + } else { + c.responses = append(c.responses, resp) + c.repeatResponse = append(c.repeatResponse, repeat) + } +} + +// Attempts returns the number of times Do was called. +func (c *Sender) Attempts() int { + return c.attempts +} + +// SetError sets the error Do should return. +func (c *Sender) SetError(err error) { + c.SetAndRepeatError(err, 1) +} + +// SetAndRepeatError sets the error Do should return and how many calls to Do will return the error. +// A negative repeat value will return the error for all remaining calls to Do. +func (c *Sender) SetAndRepeatError(err error, repeat int) { + c.err = err + c.repeatError = repeat +} + +// SetEmitErrorAfter sets the number of attempts to be made before errors are emitted. +func (c *Sender) SetEmitErrorAfter(ea int) { + c.emitErrorAfter = ea +} + +// T is a simple testing struct. +type T struct { + Name string `json:"name" xml:"Name"` + Age int `json:"age" xml:"Age"` +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..2290c4010 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,442 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go new file mode 100644 index 000000000..9ba608a8f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go @@ -0,0 +1,766 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed +// Preparer and decorates the response. +func ExamplePrepareDecorator() { + path := "a/b/c/" + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("ERROR: URL is not set") + } + r.URL.Path += path + } + return r, err + }) + } + } + + r, _ := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + pd()) + + fmt.Printf("Path is %s\n", r.URL) + // Output: Path is https://microsoft.com/a/b/c/ +} + +// PrepareDecorators may also modify and then invoke the Preparer. +func ExamplePrepareDecorator_pre() { + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") + return p.Prepare(r) + }) + } + } + + r, _ := Prepare(&http.Request{Header: http.Header{}}, + pd()) + + fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) + // Output: ContentType is application/json +} + +// Create a sequence of three Preparers that build up the URL path. +func ExampleCreatePreparer() { + p := CreatePreparer( + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c +} + +// Create and apply separate Preparers +func ExampleCreatePreparer_multiple() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p1 := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p2 := CreatePreparer(WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p1.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + r, err = p2.Prepare(r) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and chain separate Preparers +func ExampleCreatePreparer_chain() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p = DecoratePreparer(p, WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and prepare an http.Request in one call +func ExamplePrepare() { + r, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/"), + WithPath("a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("%s %s", r.Method, r.URL) + } + // Output: GET https://microsoft.com/a/b/c/ +} + +// Create a request for a supplied base URL and path +func ExampleWithBaseURL() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +func ExampleWithBaseURL_second() { + _, err := Prepare(&http.Request{}, WithBaseURL(":")) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +func ExampleWithCustomBaseURL() { + r, err := Prepare(&http.Request{}, + WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://myaccount.blob.core.windows.net/ +} + +func ExampleWithCustomBaseURL_second() { + _, err := Prepare(&http.Request{}, + WithCustomBaseURL(":", map[string]interface{}{})) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +// Create a request with a custom HTTP header +func ExampleWithHeader() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/"), + WithHeader("x-foo", "bar")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) + } + // Output: Header x-foo=bar +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithFormData() { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains age=42&name=Rob+Pike +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithJSON() { + t := mocks.T{Name: "Rob Pike", Age: 42} + + r, err := Prepare(&http.Request{}, + WithJSON(&t)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains {"name":"Rob Pike","age":42} +} + +// Create a request from a path with escaped parameters +func ExampleWithEscapedPathParameters() { + params := map[string]interface{}{ + "param1": "a b c", + "param2": "d e f", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithEscapedPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a+b+c/b/d+e+f/ +} + +// Create a request from a path with parameters +func ExampleWithPathParameters() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with query parameters +func ExampleWithQueryParameters() { + params := map[string]interface{}{ + "q1": "value1", + "q2": "value2", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/a/b/c/"), + WithQueryParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 +} + +func TestWithCustomBaseURL(t *testing.T) { + r, err := Prepare(&http.Request{}, WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + t.Fatalf("autorest: WithCustomBaseURL should not fail") + } + if r.URL.String() != "https://myaccount.blob.core.windows.net/" { + t.Fatalf("autorest: WithCustomBaseURL expected https://myaccount.blob.core.windows.net/, got %s", r.URL) + } +} + +func TestWithCustomBaseURLwithInvalidURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithCustomBaseURL("hello/{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err == nil { + t.Fatalf("autorest: WithCustomBaseURL should fail fo URL parse error") + } +} + +func TestWithPathWithInvalidPath(t *testing.T) { + p := "path%2*end" + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape error for path '%v' ", p) + } + +} + +func TestWithPathParametersWithInvalidPath(t *testing.T) { + p := "path%2*end" + m := map[string]interface{}{ + "path1": p, + } + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPathParameters("/{path1}/", m)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape for path '%v' ", p) + } + +} + +func TestCreatePreparerDoesNotModify(t *testing.T) { + r1 := &http.Request{} + p := CreatePreparer() + r2, err := p.Prepare(r1) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreatePreparer without decorators modified the request") + } +} + +func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if r.URL.String() != "https:/1/2/3" && r.URL.Host != "microsoft.com" { + t.Fatalf("autorest: CreatePreparer failed to run decorators in order") + } +} + +func TestAsContentType(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != "application/text" { + t.Fatalf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsFormURLEncoded(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeFormPost { + t.Fatalf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsJSON(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsJSON()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeJSON { + t.Fatalf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestWithNothing(t *testing.T) { + r1 := mocks.NewRequest() + r2, err := Prepare(r1, WithNothing()) + if err != nil { + t.Fatalf("autorest: WithNothing returned an unexpected error (%v)", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("azure: WithNothing modified the passed HTTP Request") + } +} + +func TestWithBearerAuthorization(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { + t.Fatalf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) + } +} + +func TestWithUserAgent(t *testing.T) { + ua := "User Agent Go" + r, err := Prepare(mocks.NewRequest(), WithUserAgent(ua)) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.UserAgent() != ua || r.Header.Get(headerUserAgent) != ua { + t.Fatalf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) + } +} + +func TestWithMethod(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) + if r.Method != "HEAD" { + t.Fatal("autorest: WithMethod failed to set HTTP method header") + } +} + +func TestAsDelete(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsDelete()) + if r.Method != "DELETE" { + t.Fatal("autorest: AsDelete failed to set HTTP method header to DELETE") + } +} + +func TestAsGet(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsGet()) + if r.Method != "GET" { + t.Fatal("autorest: AsGet failed to set HTTP method header to GET") + } +} + +func TestAsHead(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsHead()) + if r.Method != "HEAD" { + t.Fatal("autorest: AsHead failed to set HTTP method header to HEAD") + } +} + +func TestAsOptions(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsOptions()) + if r.Method != "OPTIONS" { + t.Fatal("autorest: AsOptions failed to set HTTP method header to OPTIONS") + } +} + +func TestAsPatch(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPatch()) + if r.Method != "PATCH" { + t.Fatal("autorest: AsPatch failed to set HTTP method header to PATCH") + } +} + +func TestAsPost(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPost()) + if r.Method != "POST" { + t.Fatal("autorest: AsPost failed to set HTTP method header to POST") + } +} + +func TestAsPut(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPut()) + if r.Method != "PUT" { + t.Fatal("autorest: AsPut failed to set HTTP method header to PUT") + } +} + +func TestPrepareWithNullRequest(t *testing.T) { + _, err := Prepare(nil) + if err == nil { + t.Fatal("autorest: Prepare failed to return an error when given a null http.Request") + } +} + +func TestWithFormDataSetsContentLength(t *testing.T) { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + expected := "name=Rob+Pike&age=42" + if !(string(b) == "name=Rob+Pike&age=42" || string(b) == "age=42&name=Rob+Pike") { + t.Fatalf("autorest:WithFormData failed to return correct string got (%v), expected (%v)", string(b), expected) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithMultiPartFormDataSetsContentLength(t *testing.T) { + v := map[string]interface{}{ + "file": ioutil.NopCloser(strings.NewReader("Hello Gopher")), + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithMultiPartFormDataWithNoFile(t *testing.T) { + v := map[string]interface{}{ + "file": "no file", + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithFile(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFile(ioutil.NopCloser(strings.NewReader("Hello Gopher")))) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFile set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithBool_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithBool(false)) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", false))) { + t.Fatalf("autorest: WithBool set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", false)))) + } + + v, err := strconv.ParseBool(string(s)) + if err != nil || v { + t.Fatalf("autorest: WithBool incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat32(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 32) + if err != nil || float32(v) != float32(42.0) { + t.Fatalf("autorest: WithFloat32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat64(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 64) + if err != nil || v != float64(42.0) { + t.Fatalf("autorest: WithFloat64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt32(42)) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 32) + if err != nil || int32(v) != int32(42) { + t.Fatalf("autorest: WithInt32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt64(42)) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 64) + if err != nil || v != int64(42) { + t.Fatalf("autorest: WithInt64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithString_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithString("value")) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + if r.ContentLength != int64(len("value")) { + t.Fatalf("autorest: WithString set Content-Length to %v, expected %v", r.ContentLength, int64(len("value"))) + } + + if string(s) != "value" { + t.Fatalf("autorest: WithString incorrectly encoded the string as %v", s) + } +} + +func TestWithJSONSetsContentLength(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithHeaderAllocatesHeaders(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) + if err != nil { + t.Fatalf("autorest: WithHeader failed (%v)", err) + } + if r.Header.Get("x-foo") != "bar" { + t.Fatalf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) + } +} + +func TestWithPathCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPath("a")) + if err == nil { + t.Fatalf("autorest: WithPath failed to catch a nil URL") + } +} + +func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithEscapedPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithEscapedPathParameters failed to catch a nil URL") + } +} + +func TestWithPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithPathParameters failed to catch a nil URL") + } +} + +func TestWithQueryParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithQueryParameters failed to catch a nil URL") + } +} + +func TestModifyingExistingRequest(t *testing.T) { + r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) + if err != nil { + t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) + } + if r.URL.String() != "https:/search?q=golang" && r.URL.Host != "bing.com" { + t.Fatalf("autorest: Preparing an existing request failed (%s)", r.URL) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..a908a0adb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,250 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go new file mode 100644 index 000000000..4a57b1e3b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go @@ -0,0 +1,665 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleWithErrorUnlessOK() { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + // Respond and leave the response body open (for a subsequent responder to close) + err := Respond(r, + WithErrorUnlessOK(), + ByDiscardingBody(), + ByClosingIfError()) + + if err == nil { + fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) + + // Complete handling the response and close the body + Respond(r, + ByDiscardingBody(), + ByClosing()) + } + // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 +} + +func ExampleByUnmarshallingJSON() { + c := ` + { + "name" : "Rob Pike", + "age" : 42 + } + ` + + type V struct { + Name string `json:"name"` + Age int `json:"age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingJSON(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func ExampleByUnmarshallingXML() { + c := ` + + Rob Pike + 42 + ` + + type V struct { + Name string `xml:"Name"` + Age int `xml:"Age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingXML(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func TestCreateResponderDoesNotModify(t *testing.T) { + r1 := mocks.NewResponse() + r2 := mocks.NewResponse() + p := CreateResponder() + err := p.Respond(r1) + if err != nil { + t.Fatalf("autorest: CreateResponder failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreateResponder without decorators modified the response") + } +} + +func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { + s := "" + + d := func(n int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + s += fmt.Sprintf("%d", n) + } + return err + }) + } + } + + p := CreateResponder(d(1), d(2), d(3)) + err := p.Respond(&http.Response{}) + if err != nil { + t.Fatalf("autorest: Respond failed (%v)", err) + } + + if s != "123" { + t.Fatalf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) + } +} + +func TestByIgnoring(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(r2 *http.Response) error { + r1 := mocks.NewResponse() + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) + } + return nil + }) + } + })(), + ByIgnoring(), + ByClosing()) +} + +func TestByCopying_Copies(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + err := Respond(r, + ByCopying(b), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByCopying returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: ByCopying failed to copy the bytes read") + } +} + +func TestByCopying_ReturnsNestedErrors(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + + r.Body.Close() + err := Respond(r, + ByCopying(&bytes.Buffer{}), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByCopying failed to return the expected error") + } +} + +func TestByCopying_AcceptsNilReponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByCopying_AcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByClosing(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, ByClosing()) + if err != nil { + t.Fatalf("autorest: ByClosing failed (%v)", err) + } + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body") + } +} + +func TestByClosingAcceptsNilResponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingAcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingClosesEvenAfterErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body after an error occurred") + } +} + +func TestByClosingClosesReturnsNestedErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + err := Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if err == nil || !reflect.DeepEqual(e, err) { + t.Fatalf("autorest: ByClosing failed to return a nested error") + } +} + +func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosingIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError did not close the response body after an error occurred") + } +} + +func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { + r := mocks.NewResponse() + Respond(r, + ByClosingIfError()) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError closed the response body even though no error occurred") + } +} + +func TestByDiscardingBody(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, + ByDiscardingBody()) + if err != nil { + t.Fatalf("autorest: ByDiscardingBody failed (%v)", err) + } + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: Reading result of ByDiscardingBody failed (%v)", err) + } + + if len(buf) != 0 { + t.Logf("autorest: Body was not empty after calling ByDiscardingBody.") + t.Fail() + } +} + +func TestByDiscardingBodyAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByDiscardingBody()) +} + +func TestByDiscardingBodyAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByDiscardingBody()) +} + +func TestByUnmarshallingJSON(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingJSON failed to properly unmarshal") + } +} + +func TestByUnmarshallingJSON_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to receive / respond to read error") + } +} + +func TestByUnmarshallingJSONIncludesJSONInErrors(t *testing.T) { + v := &mocks.T{} + j := jsonT[0 : len(jsonT)-2] + r := mocks.NewResponseWithContent(j) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), j) { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) + } +} + +func TestByUnmarshallingJSONEmptyInput(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(``) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return nil in case of empty JSON (%v)", err) + } +} + +func TestByUnmarshallingXML(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingXML failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingXML failed to properly unmarshal") + } +} + +func TestByUnmarshallingXML_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingXML failed to receive / respond to read error") + } +} + +func TestByUnmarshallingXMLIncludesXMLInErrors(t *testing.T) { + v := &mocks.T{} + x := xmlT[0 : len(xmlT)-2] + r := mocks.NewResponseWithContent(x) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), x) { + t.Fatalf("autorest: ByUnmarshallingXML failed to return XML in error (%v)", err) + } +} + +func TestRespondAcceptsNullResponse(t *testing.T) { + err := Respond(nil) + if err != nil { + t.Fatalf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) + } +} + +func TestWithErrorUnlessStatusCodeOKResponse(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) failed on okay response. (%v)", err) + } + + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) corrupted the response body of okay response.") + } +} + +func TesWithErrorUnlessStatusCodeErrorResponse(t *testing.T) { + v := &mocks.T{} + e := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err == nil { + t.Fatal("autorest: WithErrorUnlessStatusCode(http.StatusOK) did not return error, on a response to a bad request.") + } + + var errorRespBody []byte + if derr, ok := err.(DetailedError); !ok { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) got wrong error type : %T, expected: DetailedError, on a response to a bad request.", err) + } else { + errorRespBody = derr.ServiceError + } + + if errorRespBody == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) ServiceError not returned in DetailedError on a response to a bad request.") + } + + err = json.Unmarshal(errorRespBody, e) + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) cannot parse error returned in ServiceError into json. %v", err) + } + + expected := &mocks.T{Name: "Rob Pike", Age: 42} + if e != expected { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK wrong value from parsed ServiceError: got=%#v expected=%#v", e, expected) + } +} + +func TestWithErrorUnlessStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) + } +} + +func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) + } +} + +func TestWithErrorUnlessOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) + } +} + +func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) + } +} + +func TestExtractHeader(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderHandlesMissingHeader(t *testing.T) { + var v []string + r := mocks.NewResponse() + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", + v, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValue(t *testing.T) { + r := mocks.NewResponse() + v := "v1" + mocks.SetResponseHeader(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { + r := mocks.NewResponse() + v := "" + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..fa11dbed7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..7143cc61b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..ae15c6bf9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..7264c32f2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Cancel) + if !delayed { + DelayForBackoff(backoff, attempt, r.Cancel) + } + } + return resp, err + }) + } +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go new file mode 100644 index 000000000..a72731d41 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go @@ -0,0 +1,811 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "log" + "net/http" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleSendWithSender() { + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 10) + + logger := log.New(os.Stdout, "autorest: ", 0) + na := NullAuthorizer{} + + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/"), + na.WithAuthorization()) + + r, _ = SendWithSender(client, req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusAccepted), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + // Output: + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted +} + +func ExampleDoRetryForAttempts() { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + // Retry with backoff -- ensure returned Bodies are closed + r, _ := SendWithSender(client, mocks.NewRequest(), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts", client.Attempts()) + // Output: Retry stopped after 5 attempts +} + +func ExampleDoErrorIfStatusCode() { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 NoContent", http.StatusNoContent), 10) + + // Chain decorators to retry the request, up to five times, if the status code is 204 + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusNoContent), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) + // Output: Retry stopped after 5 attempts with code 204 NoContent +} + +func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { + client := mocks.NewSender() + s := "" + + r, err := SendWithSender(client, mocks.NewRequest(), + withMessage(&s, "a"), + withMessage(&s, "b"), + withMessage(&s, "c")) + if err != nil { + t.Fatalf("autorest: SendWithSender returned an error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if s != "abc" { + t.Fatalf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) + } +} + +func TestCreateSender(t *testing.T) { + f := false + + s := CreateSender( + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + s.Do(&http.Request{}) + + if !f { + t.Fatal("autorest: CreateSender failed to apply supplied decorator") + } +} + +func TestSend(t *testing.T) { + f := false + + Send(&http.Request{}, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + + if !f { + t.Fatal("autorest: Send failed to apply supplied decorator") + } +} + +func TestAfterDelayWaits(t *testing.T) { + client := mocks.NewSender() + + d := 2 * time.Second + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + s := time.Since(tt) + if s < d { + t.Fatal("autorest: AfterDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestAfterDelay_Cancels(t *testing.T) { + client := mocks.NewSender() + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + tt := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + wg.Done() + SendWithSender(client, req, + AfterDelay(delay)) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(tt) >= delay { + t.Fatal("autorest: AfterDelay failed to cancel") + } +} + +func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { + client := mocks.NewSender() + + d := 5 * time.Millisecond + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: AfterDelay waited too long (exceeded 5 times specified duration)") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestAsIs(t *testing.T) { + client := mocks.NewSender() + + r1 := mocks.NewResponse() + client.AppendResponse(r1) + + r2, err := SendWithSender(client, mocks.NewRequest(), + AsIs()) + if err != nil { + t.Fatalf("autorest: AsIs returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) + } + + Respond(r1, + ByDiscardingBody(), + ByClosing()) + Respond(r2, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoCloseIfError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected DoCloseIfError to close response body -- it was left open") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + resp.Body = nil + return resp, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoErrorIfStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to emit an error for passed code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to ignore a status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorUnlessStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 5 { + t.Fatal("autorest: DoRetryForAttempts failed to stop after specified number of attempts") + } +} + +func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(1, time.Duration(0))) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForAttempts failed to return the underlying response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) < d { + t.Fatal("autorest: DoRetryForDuration failed stopped too soon") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Second + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForDuration failed to return the underlying response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDelayForBackoff(t *testing.T) { + d := 2 * time.Second + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) < d { + t.Fatal("autorest: DelayForBackoff did not delay as long as expected") + } +} + +func TestDelayForBackoff_Cancels(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + DelayForBackoff(delay, 0, cancel) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatal("autorest: DelayForBackoff failed to cancel") + } +} + +func TestDelayForBackoffWithinReason(t *testing.T) { + d := 5 * time.Second + maxCoefficient := 2 + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) > (time.Duration(maxCoefficient) * d) { + + t.Fatalf("autorest: DelayForBackoff delayed too long (exceeded %d times the specified duration)", maxCoefficient) + } +} + +func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Duration(0), time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes polled for unspecified status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() != 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to poll for specified status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r := mocks.NewResponse() + mocks.SetAcceptedHeaders(r) + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 100) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + Respond(r, + ByDiscardingBody(), + ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to cancel") + } +} + +func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + resp := newAcceptedResponse() + + client := mocks.NewSender() + client.AppendAndRepeatResponse(resp, 2) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if resp.Body.(*mocks.Body).IsOpen() || resp.Body.(*mocks.Body).CloseAttempts() < 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not close unreturned response bodies") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not leave open the body of the last response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() > 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to stop polling after receiving an error") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if err == nil { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to return error from polling") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestWithLogging_Logs(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + + r, _ := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestWithLogging_HandlesMissingResponse(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if r != nil || err == nil { + t.Fatal("autorest: Sender#WithLogging returned a valid response -- expecting nil") + } + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request for a nil response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("408 Request Timeout", http.StatusRequestTimeout), 2) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(5, time.Duration(2*time.Second), http.StatusRequestTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("504 Gateway Timeout", http.StatusGatewayTimeout), 5) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: failed stop after %v retry attempts; Want: Stop after 2 retry attempts", + client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 1 || r.Status != "204 No Content" { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Retry attempts %v for StatusCode %v; Want: 0 attempts for StatusCode 204", + client.Attempts(), r.Status) + } +} + +func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 2) + + r, err := SendWithSender(client, mocks.NewRequestWithCloseBody(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if err == nil || client.Attempts() != 0 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Not failed for request body read error; Want: Failed for body read error - %v", err) + } +} + +func newAcceptedResponse() *http.Response { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + return resp +} + +func TestDelayWithRetryAfterWithSuccess(t *testing.T) { + after, retries := 5, 2 + totalSecs := after * retries + + client := mocks.NewSender() + resp := mocks.NewResponseWithStatus("429 Too many requests", http.StatusTooManyRequests) + mocks.SetResponseHeader(resp, "Retry-After", fmt.Sprintf("%v", after)) + client.AppendAndRepeatResponse(resp, retries) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + d := time.Second * time.Duration(totalSecs) + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(5, time.Duration(time.Second), http.StatusTooManyRequests), + ) + + if time.Since(start) < d { + t.Fatal("autorest: DelayWithRetryAfter failed stopped too soon") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DelayWithRetryAfter -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 000000000..fdda2ce1a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,147 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go new file mode 100644 index 000000000..f8177a163 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go @@ -0,0 +1,234 @@ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "reflect" + "testing" +) + +func TestString(t *testing.T) { + v := "" + if String(&v) != v { + t.Fatalf("to: String failed to return the correct string -- expected %v, received %v", + v, String(&v)) + } +} + +func TestStringHandlesNil(t *testing.T) { + if String(nil) != "" { + t.Fatalf("to: String failed to correctly convert nil -- expected %v, received %v", + "", String(nil)) + } +} + +func TestStringPtr(t *testing.T) { + v := "" + if *StringPtr(v) != v { + t.Fatalf("to: StringPtr failed to return the correct string -- expected %v, received %v", + v, *StringPtr(v)) + } +} + +func TestStringSlice(t *testing.T) { + v := []string{} + if out := StringSlice(&v); !reflect.DeepEqual(out, v) { + t.Fatalf("to: StringSlice failed to return the correct slice -- expected %v, received %v", + v, out) + } +} + +func TestStringSliceHandlesNil(t *testing.T) { + if out := StringSlice(nil); out != nil { + t.Fatalf("to: StringSlice failed to correctly convert nil -- expected %v, received %v", + nil, out) + } +} + +func TestStringSlicePtr(t *testing.T) { + v := []string{"a", "b"} + if out := StringSlicePtr(v); !reflect.DeepEqual(*out, v) { + t.Fatalf("to: StringSlicePtr failed to return the correct slice -- expected %v, received %v", + v, *out) + } +} + +func TestStringMap(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if *msp[k] != v { + t.Fatalf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapHandlesNil(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if msp[k] == nil && v != "" { + t.Fatalf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapPtr(t *testing.T) { + ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} + for k, msp := range *StringMapPtr(ms) { + if ms[k] != *msp { + t.Fatalf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, ms[k], k, *msp) + } + } +} + +func TestBool(t *testing.T) { + v := false + if Bool(&v) != v { + t.Fatalf("to: Bool failed to return the correct string -- expected %v, received %v", + v, Bool(&v)) + } +} + +func TestBoolHandlesNil(t *testing.T) { + if Bool(nil) != false { + t.Fatalf("to: Bool failed to correctly convert nil -- expected %v, received %v", + false, Bool(nil)) + } +} + +func TestBoolPtr(t *testing.T) { + v := false + if *BoolPtr(v) != v { + t.Fatalf("to: BoolPtr failed to return the correct string -- expected %v, received %v", + v, *BoolPtr(v)) + } +} + +func TestInt(t *testing.T) { + v := 0 + if Int(&v) != v { + t.Fatalf("to: Int failed to return the correct string -- expected %v, received %v", + v, Int(&v)) + } +} + +func TestIntHandlesNil(t *testing.T) { + if Int(nil) != 0 { + t.Fatalf("to: Int failed to correctly convert nil -- expected %v, received %v", + 0, Int(nil)) + } +} + +func TestIntPtr(t *testing.T) { + v := 0 + if *IntPtr(v) != v { + t.Fatalf("to: IntPtr failed to return the correct string -- expected %v, received %v", + v, *IntPtr(v)) + } +} + +func TestInt32(t *testing.T) { + v := int32(0) + if Int32(&v) != v { + t.Fatalf("to: Int32 failed to return the correct string -- expected %v, received %v", + v, Int32(&v)) + } +} + +func TestInt32HandlesNil(t *testing.T) { + if Int32(nil) != int32(0) { + t.Fatalf("to: Int32 failed to correctly convert nil -- expected %v, received %v", + 0, Int32(nil)) + } +} + +func TestInt32Ptr(t *testing.T) { + v := int32(0) + if *Int32Ptr(v) != v { + t.Fatalf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", + v, *Int32Ptr(v)) + } +} + +func TestInt64(t *testing.T) { + v := int64(0) + if Int64(&v) != v { + t.Fatalf("to: Int64 failed to return the correct string -- expected %v, received %v", + v, Int64(&v)) + } +} + +func TestInt64HandlesNil(t *testing.T) { + if Int64(nil) != int64(0) { + t.Fatalf("to: Int64 failed to correctly convert nil -- expected %v, received %v", + 0, Int64(nil)) + } +} + +func TestInt64Ptr(t *testing.T) { + v := int64(0) + if *Int64Ptr(v) != v { + t.Fatalf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", + v, *Int64Ptr(v)) + } +} + +func TestFloat32(t *testing.T) { + v := float32(0) + if Float32(&v) != v { + t.Fatalf("to: Float32 failed to return the correct string -- expected %v, received %v", + v, Float32(&v)) + } +} + +func TestFloat32HandlesNil(t *testing.T) { + if Float32(nil) != float32(0) { + t.Fatalf("to: Float32 failed to correctly convert nil -- expected %v, received %v", + 0, Float32(nil)) + } +} + +func TestFloat32Ptr(t *testing.T) { + v := float32(0) + if *Float32Ptr(v) != v { + t.Fatalf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", + v, *Float32Ptr(v)) + } +} + +func TestFloat64(t *testing.T) { + v := float64(0) + if Float64(&v) != v { + t.Fatalf("to: Float64 failed to return the correct string -- expected %v, received %v", + v, Float64(&v)) + } +} + +func TestFloat64HandlesNil(t *testing.T) { + if Float64(nil) != float64(0) { + t.Fatalf("to: Float64 failed to correctly convert nil -- expected %v, received %v", + 0, Float64(nil)) + } +} + +func TestFloat64Ptr(t *testing.T) { + v := float64(0) + if *Float64Ptr(v) != v { + t.Fatalf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", + v, *Float64Ptr(v)) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..dfdc6efdf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,192 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "reflect" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_test.go b/vendor/github.com/Azure/go-autorest/autorest/utility_test.go new file mode 100644 index 000000000..1cda8758f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_test.go @@ -0,0 +1,382 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + jsonT = ` + { + "name":"Rob Pike", + "age":42 + }` + xmlT = ` + + Rob Pike + 42 + ` +) + +func TestNewDecoderCreatesJSONDecoder(t *testing.T) { + d := NewDecoder(EncodedAsJSON, strings.NewReader(jsonT)) + _, ok := d.(*json.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create a JSON decoder when requested") + } +} + +func TestNewDecoderCreatesXMLDecoder(t *testing.T) { + d := NewDecoder(EncodedAsXML, strings.NewReader(xmlT)) + _, ok := d.(*xml.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create an XML decoder when requested") + } +} + +func TestNewDecoderReturnsNilForUnknownEncoding(t *testing.T) { + d := NewDecoder("unknown", strings.NewReader(xmlT)) + if d != nil { + t.Fatal("autorest: NewDecoder created a decoder for an unknown encoding") + } +} + +func TestCopyAndDecodeDecodesJSON(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid JSON - %v", err) + } +} + +func TestCopyAndDecodeDecodesXML(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid XML - %v", err) + } +} + +func TestCopyAndDecodeReturnsJSONDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT[0:len(jsonT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid JSON") + } +} + +func TestCopyAndDecodeReturnsXMLDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT[0:len(xmlT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid XML") + } +} + +func TestCopyAndDecodeAlwaysReturnsACopy(t *testing.T) { + b, _ := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if b.String() != jsonT { + t.Fatalf("autorest: CopyAndDecode failed to return a valid copy of the data - %v", b.String()) + } +} + +func TestTeeReadCloser_Copies(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + r.Body = TeeReadCloser(r.Body, b) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: TeeReadCloser failed to copy the bytes read") + } +} + +func TestTeeReadCloser_PassesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + r.Body.(*mocks.Body).Close() + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: TeeReadCloser failed to return the expected error") + } +} + +func TestTeeReadCloser_ClosesWrappedReader(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + b := r.Body.(*mocks.Body) + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.IsOpen() { + t.Fatalf("autorest: TeeReadCloser failed to close the nested io.ReadCloser") + } +} + +func TestContainsIntFindsValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 5 + if !containsInt(ints, v) { + t.Fatalf("autorest: containsInt failed to find %v in %v", v, ints) + } +} + +func TestContainsIntDoesNotFindValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 42 + if containsInt(ints, v) { + t.Fatalf("autorest: containsInt unexpectedly found %v in %v", v, ints) + } +} + +func TestContainsIntAcceptsEmptyList(t *testing.T) { + ints := make([]int, 10) + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an empty list") + } +} + +func TestContainsIntAcceptsNilList(t *testing.T) { + var ints []int + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an nil list") + } +} + +func TestEscapeStrings(t *testing.T) { + m := map[string]string{ + "string": "a long string with = odd characters", + "int": "42", + "nil": "", + } + r := map[string]string{ + "string": "a+long+string+with+%3D+odd+characters", + "int": "42", + "nil": "", + } + v := escapeValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func TestEnsureStrings(t *testing.T) { + m := map[string]interface{}{ + "string": "string", + "int": 42, + "nil": nil, + "bytes": []byte{255, 254, 253}, + } + r := map[string]string{ + "string": "string", + "int": "42", + "nil": "", + "bytes": string([]byte{255, 254, 253}), + } + v := ensureValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func ExampleString() { + m := []string{ + "string1", + "string2", + "string3", + } + + fmt.Println(String(m, ",")) + // Output: string1,string2,string3 +} + +func TestStringWithValidString(t *testing.T) { + i := 123 + if String(i) != "123" { + t.Fatal("autorest: String method failed to convert integer 123 to string") + } +} + +func TestEncodeWithValidPath(t *testing.T) { + s := Encode("Path", "Hello Gopher") + if s != "Hello%20Gopher" { + t.Fatalf("autorest: Encode method failed for valid path encoding. Got: %v; Want: %v", s, "Hello%20Gopher") + } +} + +func TestEncodeWithValidQuery(t *testing.T) { + s := Encode("Query", "Hello Gopher") + if s != "Hello+Gopher" { + t.Fatalf("autorest: Encode method failed for valid query encoding. Got: '%v'; Want: 'Hello+Gopher'", s) + } +} + +func TestEncodeWithValidNotPathQuery(t *testing.T) { + s := Encode("Host", "Hello Gopher") + if s != "Hello Gopher" { + t.Fatalf("autorest: Encode method failed for parameter not query or path. Got: '%v'; Want: 'Hello Gopher'", s) + } +} + +func TestMapToValues(t *testing.T) { + m := map[string]interface{}{ + "a": "a", + "b": 2, + } + v := url.Values{} + v.Add("a", "a") + v.Add("b", "2") + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +func TestMapToValuesWithArrayValues(t *testing.T) { + m := map[string]interface{}{ + "a": []string{"a", "b"}, + "b": 2, + "c": []int{3, 4}, + } + v := url.Values{} + v.Add("a", "a") + v.Add("a", "b") + v.Add("b", "2") + v.Add("c", "3") + v.Add("c", "4") + + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +func isEqual(v, u url.Values) bool { + for key, value := range v { + if len(u[key]) == 0 { + return false + } + sort.Strings(value) + sort.Strings(u[key]) + for i := range value { + if value[i] != u[key][i] { + return false + } + } + u.Del(key) + } + if len(u) > 0 { + return false + } + return true +} + +func doEnsureBodyClosed(t *testing.T) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected Body to be closed -- it was left open") + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} + +func withMessage(output *string, msg string) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + *output += msg + } + return resp, err + }) + } +} + +func withErrorRespondDecorator(e *error) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil { + return err + } + *e = fmt.Errorf("autorest: Faux Respond Error") + return *e + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go b/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go new file mode 100644 index 000000000..6dc2c3b07 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go @@ -0,0 +1,56 @@ +package utils + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "os" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" +) + +// GetAuthorizer gets an Azure Service Principal authorizer. +// This func assumes "AZURE_TENANT_ID", "AZURE_CLIENT_ID", +// "AZURE_CLIENT_SECRET" are set as environment variables. +func GetAuthorizer(env azure.Environment) (*autorest.BearerAuthorizer, error) { + tenantID := GetEnvVarOrExit("AZURE_TENANT_ID") + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID) + if err != nil { + return nil, err + } + + clientID := GetEnvVarOrExit("AZURE_CLIENT_ID") + clientSecret := GetEnvVarOrExit("AZURE_CLIENT_SECRET") + + spToken, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, env.ResourceManagerEndpoint) + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// GetEnvVarOrExit returns the value of specified environment variable or terminates if it's not defined. +func GetEnvVarOrExit(varName string) string { + value := os.Getenv(varName) + if value == "" { + fmt.Printf("Missing environment variable %s\n", varName) + os.Exit(1) + } + return value +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go b/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go new file mode 100644 index 000000000..9bc4e3e04 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go @@ -0,0 +1,32 @@ +package utils + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "os/exec" +) + +// GetCommit returns git HEAD (short) +func GetCommit() string { + cmd := exec.Command("git", "rev-parse", "HEAD") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "" + } + return string(out.Bytes()[:7]) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 000000000..3fe62c930 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,395 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := v.Rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +// NewErrorWithValidationError appends package type and method name in +// validation error. +func NewErrorWithValidationError(err error, packageType, method string) error { + return fmt.Errorf("%s#%s: Invalid input: %v", packageType, method, err) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go new file mode 100644 index 000000000..c86649d8f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go @@ -0,0 +1,2433 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCheckForUniqueInArrayTrue(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3})), true) +} + +func TestCheckForUniqueInArrayFalse(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3, 3})), false) +} + +func TestCheckForUniqueInArrayEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{})), false) +} + +func TestCheckForUniqueInMapTrue(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[string]int{"one": 1, "two": 2})), true) +} + +func TestCheckForUniqueInMapFalse(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{1: "one", 2: "one"})), false) +} + +func TestCheckForUniqueInMapEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{})), false) +} + +func TestCheckEmpty_WithValueEmptyRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithEmptyStringRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, checkEmpty(reflect.ValueOf(x), v)) +} + +func TestCheckEmpty_IncorrectRule(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithErrorArray(t *testing.T) { + var x interface{} = []string{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null; required parameter") + require.Equal(t, checkNil(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + require.Nil(t, checkNil(reflect.ValueOf(x), v)) +} + +func TestCheckNil_IncorrectRule(t *testing.T) { + var x interface{} + c := Constraint{ + Target: "str", + Name: Null, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, checkNil(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_WithNilValueRuleTrue(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: Null, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithNilValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithValueRuleNullTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyValueRuleTrue(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null or empty; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithEmptyValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyRuleEmptyTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsNoError(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsWithError(t *testing.T) { + var x interface{} = []string{"1", "2", "3"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("maximum item limit is %v; got: 3", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsWithEmpty(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsNoError1(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2}), c)) +} + +func TestValidateArrayMap_MinItemsNoError2(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2, 3}), c)) +} + +func TestValidateArrayMap_MinItemsWithError(t *testing.T) { + var x interface{} = []int{1} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 1", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsWithEmpty(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 0", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MaxItemsNoError(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MaxItemsWithError(t *testing.T) { + a := map[int]string{1: "1", 2: "2", 3: "3"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", c.Rule, len(a))), true) +} + +func TestValidateArrayMap_Map_MaxItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MinItemsNoError1(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + require.Nil(t, validateArrayMap(reflect.ValueOf(x), + Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + })) +} + +func TestValidateArrayMap_Map_MinItemsNoError2(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2", 3: "3"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsWithError(t *testing.T) { + a := map[int]string{1: "1"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MinItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +// func TestValidateArrayMap_Map_MinItemsNil(t *testing.T) { +// var a map[int]float64 +// var x interface{} = a +// c := Constraint{ +// Target: "str", +// Name: MinItems, +// Rule: true, +// Chain: nil, +// } +// expected := createError(reflect.Value(x), c, fmt.Sprintf("all items in parameter %v must be unique; got:%v", c.Target, x)) +// if z := validateArrayMap(reflect.ValueOf(x), c); strings.Contains(z.Error(), "all items in parameter str must be unique;") { +// t.Fatalf("autorest/validation: valiateArrayMap failed to return error \nexpect: %v;\ngot: %v", expected, z) +// } +// } + +func TestValidateArrayMap_Map_UniqueItemsTrue(t *testing.T) { + var x interface{} = map[float64]int{1.2: 1, 1.4: 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_UniqueItemsFalse(t *testing.T) { + var x interface{} = map[string]string{"1": "1", "2": "2", "3": "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsEmpty(t *testing.T) { + // Consider Empty map as not unique returns false + var x interface{} = map[int]float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsNil(t *testing.T) { + var a map[int]float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsTrue(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Array_UniqueItemsFalse(t *testing.T) { + var x interface{} = []string{"1", "2", "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsEmpty(t *testing.T) { + // Consider Empty array as not unique returns false + var x interface{} = []float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsNil(t *testing.T) { + // Consider nil array as not unique returns false + var a []float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidType(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", c.Name, reflect.ValueOf(x).Kind())), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidConstraint(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: "sdad", + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("constraint %v is not applicable to array, slice and map type", c.Name)), true) +} + +func TestValidateArrayMap_ValidateChainConstraint1(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint2(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint3(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraint4(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraintNilNotRequired(t *testing.T) { + var a []int + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ValidateChainConstraintEmptyNotRequired(t *testing.T) { + var x interface{} = map[string]int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ReadOnlyWithError(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateArrayMap_ReadOnlyWithoutError(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateString_ReadOnly(t *testing.T) { + var x interface{} = "Hello Gopher" + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateString_EmptyTrue(t *testing.T) { + // Empty true means parameter is required but Empty returns error + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(""), c).Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateString_EmptyFalse(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + var x interface{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf(x), c)) +} + +func TestValidateString_MaxLengthInvalid(t *testing.T) { + // Empty true means parameter is required but Empty returns error + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be less than or equal to %v", c.Rule)), true) +} + +func TestValidateString_MaxLengthValid(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 7, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MaxLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: true, // must be int for maxLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_MinLengthInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 10, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be greater than or equal to %v", c.Rule)), true) +} + +func TestValidateString_MinLengthValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MinLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: true, // must be int for minLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_PatternInvalidPattern(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[[:alnum:$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + "error parsing regexp: missing closing ]"), true) +} + +func TestValidateString_PatternMatch1(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^http://\w+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("http://masd"), c)) +} + +func TestValidateString_PatternMatch2(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("asdadad2323sad"), c)) +} + +func TestValidateString_PatternNotMatch(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value doesn't match pattern %v", c.Rule)), true) +} + +func TestValidateString_InvalidConstraint(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: "^[a-zA-Z0-9]+$", + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable to string type", c.Name)), true) +} + +func TestValidateFloat_InvalidConstraint(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3.0, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %v is not applicable for type float", c.Name)), true) +} + +func TestValidateFloat_InvalidRuleValue(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be float value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateInt_InvalidConstraint(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable for type integer", c.Name)), true) +} + +func TestValidateInt_InvalidRuleValue(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3.4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateInt_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(3), c)) +} + +func TestValidateInt_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 2, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_MultipleOfWithoutError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 10, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(2300), c)) +} + +func TestValidateInt_MultipleOfWithError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 11, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(2300), c).Error(), + createError(reflect.ValueOf(2300), c, fmt.Sprintf("value must be a multiple of %v", c.Rule)).Error()) +} + +func TestValidatePointer_NilTrue(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, // Required property + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, "value can not be null; required parameter").Error()) +} + +func TestValidatePointer_NilFalse(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: false, // not required property + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyValid(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c, "readonly parameter; must send as nil or empty in request").Error()) +} + +func TestValidatePointer_IntValid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_IntInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 11, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], "value must be greater than or equal to 11").Error()) +} + +func TestValidatePointer_IntInvalidConstraint(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + }, + }} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidConstraintInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3.0, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 12.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + "value must be greater than or equal to 12").Error()) +} + +func TestValidatePointer_InvalidConstraintFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type float", MaxItems)).Error()) +} + +func TestValidatePointer_StringValid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: Pattern, + Rule: "^[a-z]+$", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_StringInvalid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxLength, + Rule: 2, + Chain: nil, + }}} + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("hello"), c.Chain[0], + "value length must be less than or equal to 2").Error()) +} + +func TestValidatePointer_ArrayValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: "true", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&[]string{"1", "2"}), c)) +} + +func TestValidatePointer_ArrayInvalid(t *testing.T) { + z := []string{"1", "2", "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c.Chain[0], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, z)).Error()) +} + +func TestValidatePointer_MapValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&map[interface{}]string{1: "1", "1": "2"}), c)) +} + +func TestValidatePointer_MapInvalid(t *testing.T) { + z := map[interface{}]string{1: "1", "1": "2", 1.3: "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, strings.Contains(validatePtr(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", c.Target)), true) +} + +type Child struct { + I string +} +type Product struct { + C *Child + Str *string + Name string + Arr *[]string + M *map[string]string + Num *int32 +} + +type Sample struct { + M *map[string]*string + Name string +} + +func TestValidatePointer_StructWithError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, "True", + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], + "value length must be less than or equal to 2").Error()) +} + +func TestValidatePointer_WithNilStruct(t *testing.T) { + var p *Product + var x interface{} = p + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, + fmt.Sprintf("value can not be null; required parameter")).Error()) +} + +func TestValidatePointer_StructWithNoError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + }, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_FieldNotExist(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"Name", Empty, true, nil}, + }, + } + s = "Name" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(Child{"100"}), c.Chain[0], + fmt.Sprintf("field %q doesn't exist", s)).Error()) +} + +func TestValidateStruct_WithChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], "value length must be less than or equal to 2").Error()) +} + +func TestValidateStruct_WithoutChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + } + c := Constraint{"C", Null, true, + []Constraint{ + {"I", Empty, true, nil}, // throw error for Empty + }} + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(""), c.Chain[0], "value can not be null or empty; required parameter").Error()) +} + +func TestValidateStruct_WithArrayNull(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + Arr: nil, + } + c := Constraint{"Arr", Null, true, + []Constraint{ + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x.(Product).Arr), c, "value can not be null; required parameter").Error()) +} + +func TestValidateStruct_WithArrayEmptyError(t *testing.T) { + // arr := []string{} + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }} + + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[0], + fmt.Sprintf("value can not be null or empty; required parameter")).Error()) +} + +func TestValidateStruct_WithArrayEmptyWithoutError(t *testing.T) { + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, false, nil}, + {"Arr", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_ArrayWithError(t *testing.T) { + arr := []string{"1", "1"} + var x interface{} = Product{ + Arr: &arr, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + } + s := "Arr" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, *(x.(Product).Arr))).Error()) +} + +func TestValidateStruct_MapWithError(t *testing.T) { + m := map[string]string{ + "a": "hello", + "b": "hello", + } + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, true, nil}, + {"M", MaxItems, 4, nil}, + {"M", UniqueItems, true, nil}, + }, + } + + s := "M" + require.Equal(t, strings.Contains(validateStruct(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", s)), true) +} + +func TestValidateStruct_MapWithNoError(t *testing.T) { + m := map[string]string{} + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_MapNilNoError(t *testing.T) { + var m map[string]string + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidate_MapValidationWithError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 2, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + + z := Validate(v).Error() + require.Equal(t, strings.Contains(z, "minimum item limit is 2; got: 1"), true) + require.Equal(t, strings.Contains(z, "MinItems"), true) +} + +func TestValidate_MapValidationWithoutError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + {"M", Pattern, "^[a-z]+$", nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + {"M", Pattern, "^[a-z]+$", nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_UnknownType(t *testing.T) { + var c chan int + v := []Validation{ + {c, + []Constraint{{"c", Null, true, nil}}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(c), v[0].Constraints[0], + fmt.Sprintf("unknown type %v", reflect.ValueOf(c).Kind())).Error()) +} + +func TestValidate_example1(t *testing.T) { + var x1 interface{} = Product{ + Arr: &[]string{"1", "1"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }}, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }}, + {x2, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + s = "Arr" + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{"1", "1"}), v[0].Constraints[0].Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, []string{"1", "1"})).Error()) +} + +func TestValidate_Int(t *testing.T) { + n := int32(100) + v := []Validation{ + {n, + []Constraint{ + {"n", MultipleOf, 10, nil}, + {"n", ExclusiveMinimum, 100, nil}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[1], + "value must be greater than 100").Error()) +} + +func TestValidate_IntPointer(t *testing.T) { + n := int32(100) + p := &n + v := []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(v[0].TargetValue), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // Not required + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, false, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_IntStruct(t *testing.T) { + n := int32(100) + p := &Product{ + Num: &n, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, true, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Num", Null, true, []Constraint{ + {"p.Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Num), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_String(t *testing.T) { + s := "hello" + v := []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1].Chain[0], + "value length must be less than or equal to 3").Error()) + + // required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1], + "value can not be null or empty; required parameter").Error()) + + // not required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, false, nil}, + {"s", Empty, false, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StringStruct(t *testing.T) { + s := "hello" + p := &Product{ + Str: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + // e := ValidationError{ + // Constraint: MaxLength, + // Target: "Str", + // TargetValue: s, + // Details: fmt.Sprintf("value length must be less than 3", s), + // } + // if z := Validate(v); !reflect.DeepEqual(e, z) { + // t.Fatalf("autorest/validation: Validate failed to return error \nexpect: %v\ngot: %v", e, z) + // } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + "value length must be less than or equal to 3").Error()) + + // required paramter - can't be Empty + s = "" + p = &Product{ + Str: &s, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Str), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, false, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_Array(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 []string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayPointer(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {&s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {&[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 *[]string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayInStruct(t *testing.T) { + s := []string{"hello"} + p := &Product{ + Arr: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // required paramter - can't be Empty + p = &Product{ + Arr: &[]string{}, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Arr), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{ + {"Arr", Null, false, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Arr", Null, true, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StructInStruct(t *testing.T) { + p := &Product{ + C: &Child{I: "hello"}, + } + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", MinLength, 7, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value length must be greater than or equal to 7").Error()) + + // required paramter - can't be Empty + p = &Product{ + C: &Child{I: ""}, + } + + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestNewErrorWithValidationError(t *testing.T) { + p := &Product{} + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, true, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + err := createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], "value can not be null; required parameter") + z := fmt.Sprintf("batch.AccountClient#Create: Invalid input: %s", + err.Error()) + require.Equal(t, NewErrorWithValidationError(err, "batch.AccountClient", "Create").Error(), z) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..f588807db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,49 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "strings" + "sync" +) + +const ( + major = 8 + minor = 0 + patch = 0 + tag = "" +) + +var once sync.Once +var version string + +// Version returns the semantic version (see http://semver.org). +func Version() string { + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/Azure/go-autorest/glide.lock b/vendor/github.com/Azure/go-autorest/glide.lock new file mode 100644 index 000000000..695d58cff --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/glide.lock @@ -0,0 +1,44 @@ +hash: 6e0121d946623e7e609280b1b18627e1c8a767fdece54cb97c4447c1167cbc46 +updated: 2017-08-31T13:58:01.034822883+01:00 +imports: +- name: github.com/dgrijalva/jwt-go + version: 2268707a8f0843315e2004ee4f1d021dc08baedf + subpackages: + - . +- name: github.com/dimchansky/utfbom + version: 6c6132ff69f0f6c088739067407b5d32c52e1d0f +- name: github.com/mitchellh/go-homedir + version: b8bc1bf767474819792c23f32d8286a45736f1c6 +- name: golang.org/x/crypto + version: 81e90905daefcd6fd217b62423c0908922eadb30 + repo: https://github.com/golang/crypto.git + vcs: git + subpackages: + - pkcs12 + - pkcs12/internal/rc2 +- name: golang.org/x/net + version: 66aacef3dd8a676686c7ae3716979581e8b03c47 + repo: https://github.com/golang/net.git + vcs: git + subpackages: + - . +- name: golang.org/x/text + version: 21e35d45962262c8ee80f6cb048dcf95ad0e9d79 + repo: https://github.com/golang/text.git + vcs: git + subpackages: + - . +testImports: +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 890a5c3458b43e6104ff5da8dfa139d013d77544 + subpackages: + - assert + - require diff --git a/vendor/github.com/Azure/go-autorest/glide.yaml b/vendor/github.com/Azure/go-autorest/glide.yaml new file mode 100644 index 000000000..375dcfd1a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/glide.yaml @@ -0,0 +1,22 @@ +package: github.com/Azure/go-autorest +import: +- package: github.com/dgrijalva/jwt-go + subpackages: + - . +- package: golang.org/x/crypto + repo: https://github.com/golang/crypto.git + vcs: git + subpackages: + - pkcs12 +- package: golang.org/x/net + repo: https://github.com/golang/net.git + vcs: git + subpackages: + - . +- package: golang.org/x/text + repo: https://github.com/golang/text.git + vcs: git + subpackages: + - . +- package: github.com/mitchellh/go-homedir +- package: github.com/dimchansky/utfbom diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json new file mode 100644 index 000000000..7fa689d55 Binary files /dev/null and b/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json differ diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json new file mode 100644 index 000000000..7951923e3 Binary files /dev/null and b/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json differ diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json new file mode 100644 index 000000000..7d96bcfcb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json @@ -0,0 +1,12 @@ +{ + "clientId": "client-id-123", + "clientSecret": "client-secret-456", + "subscriptionId": "sub-id-789", + "tenantId": "tenant-id-123", + "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", + "resourceManagerEndpointUrl": "https://management.azure.com/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", + "galleryEndpointUrl": "https://gallery.azure.com/", + "managementEndpointUrl": "https://management.core.windows.net/" +} diff --git a/vendor/github.com/asaskevich/govalidator/.circleci/config.yml b/vendor/github.com/asaskevich/govalidator/.circleci/config.yml new file mode 100644 index 000000000..498793040 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.circleci/config.yml @@ -0,0 +1,10 @@ +version: 2 +jobs: + build: + docker: + - image: circleci/golang:1.9 + working_directory: /go/src/github.com/asaskevich/govalidator + steps: + - checkout + - run: go get -v -t -d ./... + - run: go test -v ./... diff --git a/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md b/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..12acb3e29 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,2 @@ + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 000000000..e29f8eef5 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +notifications: + email: + - bwatas@gmail.com diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 000000000..f0f7e3a8a --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 000000000..2f9a31fad --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 000000000..0e8793f71 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,496 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v4 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +}) + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +})) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func InRange(value, left, right float64) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsISO4217(str string) bool +func IsIn(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTime(str string, format string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(str string) (int64, error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func UnderscoreToCamelCase(s string) string +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +})) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +})) +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙠[[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 000000000..5bace2654 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/arrays_test.go b/vendor/github.com/asaskevich/govalidator/arrays_test.go new file mode 100644 index 000000000..1a9ac6696 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays_test.go @@ -0,0 +1,116 @@ +package govalidator + +import "testing" + +func TestEach(t *testing.T) { + // TODO Maybe refactor? + t.Parallel() + acc := 0 + data := []interface{}{1, 2, 3, 4, 5} + var fn Iterator = func(value interface{}, index int) { + acc = acc + value.(int) + } + Each(data, fn) + if acc != 15 { + t.Errorf("Expected Each(..) to be %v, got %v", 15, acc) + } +} + +func ExampleEach() { + data := []interface{}{1, 2, 3, 4, 5} + var fn Iterator = func(value interface{}, index int) { + println(value.(int)) + } + Each(data, fn) +} + +func TestMap(t *testing.T) { + // TODO Maybe refactor? + t.Parallel() + data := []interface{}{1, 2, 3, 4, 5} + var fn ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 + } + result := Map(data, fn) + for i, d := range result { + if d != fn(data[i], i) { + t.Errorf("Expected Map(..) to be %v, got %v", fn(data[i], i), d) + } + } +} + +func ExampleMap() { + data := []interface{}{1, 2, 3, 4, 5} + var fn ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 + } + _ = Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +} + +func TestFind(t *testing.T) { + // TODO Maybe refactor? + t.Parallel() + findElement := 96 + data := []interface{}{1, 2, 3, 4, findElement, 5} + var fn1 ConditionIterator = func(value interface{}, index int) bool { + return value.(int) == findElement + } + var fn2 ConditionIterator = func(value interface{}, index int) bool { + value, _ = value.(string) + return value == "govalidator" + } + val1 := Find(data, fn1) + val2 := Find(data, fn2) + if val1 != findElement { + t.Errorf("Expected Find(..) to be %v, got %v", findElement, val1) + } + if val2 != nil { + t.Errorf("Expected Find(..) to be %v, got %v", nil, val2) + } +} + +func TestFilter(t *testing.T) { + // TODO Maybe refactor? + t.Parallel() + data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + answer := []interface{}{2, 4, 6, 8, 10} + var fn ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 + } + result := Filter(data, fn) + for i := range result { + if result[i] != answer[i] { + t.Errorf("Expected Filter(..) to be %v, got %v", answer[i], result[i]) + } + } +} + +func ExampleFilter() { + data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + var fn ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 + } + _ = Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +} + +func TestCount(t *testing.T) { + // TODO Maybe refactor? + t.Parallel() + data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + count := 5 + var fn ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 + } + result := Count(data, fn) + if result != count { + t.Errorf("Expected Count(..) to be %v, got %v", count, result) + } +} + +func ExampleCount() { + data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + var fn ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 + } + _ = Count(data, fn) // result = 5 +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 000000000..cf1e5d569 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/converter_test.go b/vendor/github.com/asaskevich/govalidator/converter_test.go new file mode 100644 index 000000000..ecc457be8 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter_test.go @@ -0,0 +1,78 @@ +package govalidator + +import ( + "fmt" + "testing" +) + +func TestToInt(t *testing.T) { + tests := []string{"1000", "-123", "abcdef", "100000000000000000000000000000000000000000000"} + expected := []int64{1000, -123, 0, 0} + for i := 0; i < len(tests); i++ { + result, _ := ToInt(tests[i]) + if result != expected[i] { + t.Log("Case ", i, ": expected ", expected[i], " when result is ", result) + t.FailNow() + } + } +} + +func TestToBoolean(t *testing.T) { + tests := []string{"true", "1", "True", "false", "0", "abcdef"} + expected := []bool{true, true, true, false, false, false} + for i := 0; i < len(tests); i++ { + res, _ := ToBoolean(tests[i]) + if res != expected[i] { + t.Log("Case ", i, ": expected ", expected[i], " when result is ", res) + t.FailNow() + } + } +} + +func toString(t *testing.T, test interface{}, expected string) { + res := ToString(test) + if res != expected { + t.Log("Case ToString: expected ", expected, " when result is ", res) + t.FailNow() + } +} + +func TestToString(t *testing.T) { + toString(t, "str123", "str123") + toString(t, 123, "123") + toString(t, 12.3, "12.3") + toString(t, true, "true") + toString(t, 1.5+10i, "(1.5+10i)") + // Sprintf function not guarantee that maps with equal keys always will be equal in string representation + //toString(t, struct{ Keys map[int]int }{Keys: map[int]int{1: 2, 3: 4}}, "{map[1:2 3:4]}") +} + +func TestToFloat(t *testing.T) { + tests := []string{"", "123", "-.01", "10.", "string", "1.23e3", ".23e10"} + expected := []float64{0, 123, -0.01, 10.0, 0, 1230, 0.23e10} + for i := 0; i < len(tests); i++ { + res, _ := ToFloat(tests[i]) + if res != expected[i] { + t.Log("Case ", i, ": expected ", expected[i], " when result is ", res) + t.FailNow() + } + } +} + +func TestToJSON(t *testing.T) { + tests := []interface{}{"test", map[string]string{"a": "b", "b": "c"}, func() error { return fmt.Errorf("Error") }} + expected := [][]string{ + {"\"test\"", ""}, + {"{\"a\":\"b\",\"b\":\"c\"}", ""}, + {"", "json: unsupported type: func() error"}, + } + for i, test := range tests { + actual, err := ToJSON(test) + if actual != expected[i][0] { + t.Errorf("Expected toJSON(%v) to return '%v', got '%v'", test, expected[i][0], actual) + } + if fmt.Sprintf("%v", err) != expected[i][1] { + t.Errorf("Expected error returned from toJSON(%v) to return '%v', got '%v'", test, expected[i][1], fmt.Sprintf("%v", err)) + } + } +} diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 000000000..655b750cb --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,43 @@ +package govalidator + +import "strings" + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/error_test.go b/vendor/github.com/asaskevich/govalidator/error_test.go new file mode 100644 index 000000000..e673f2824 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error_test.go @@ -0,0 +1,29 @@ +package govalidator + +import ( + "fmt" + "testing" +) + +func TestErrorsToString(t *testing.T) { + t.Parallel() + customErr := &Error{Name: "Custom Error Name", Err: fmt.Errorf("stdlib error")} + customErrWithCustomErrorMessage := &Error{Name: "Custom Error Name 2", Err: fmt.Errorf("Bad stuff happened"), CustomErrorMessageExists: true} + + var tests = []struct { + param1 Errors + expected string + }{ + {Errors{}, ""}, + {Errors{fmt.Errorf("Error 1")}, "Error 1"}, + {Errors{fmt.Errorf("Error 1"), fmt.Errorf("Error 2")}, "Error 1;Error 2"}, + {Errors{customErr, fmt.Errorf("Error 2")}, "Custom Error Name: stdlib error;Error 2"}, + {Errors{fmt.Errorf("Error 123"), customErrWithCustomErrorMessage}, "Error 123;Bad stuff happened"}, + } + for _, test := range tests { + actual := test.param1.Error() + if actual != test.expected { + t.Errorf("Expected Error() to return '%v', got '%v'", test.expected, actual) + } + } +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 000000000..7e6c652e1 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics_test.go b/vendor/github.com/asaskevich/govalidator/numerics_test.go new file mode 100644 index 000000000..8a28415de --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics_test.go @@ -0,0 +1,549 @@ +package govalidator + +import "testing" + +func TestAbs(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected float64 + }{ + {0, 0}, + {-1, 1}, + {10, 10}, + {3.14, 3.14}, + {-96, 96}, + {-10e-12, 10e-12}, + } + for _, test := range tests { + actual := Abs(test.param) + if actual != test.expected { + t.Errorf("Expected Abs(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestSign(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected float64 + }{ + {0, 0}, + {-1, -1}, + {10, 1}, + {3.14, 1}, + {-96, -1}, + {-10e-12, -1}, + } + for _, test := range tests { + actual := Sign(test.param) + if actual != test.expected { + t.Errorf("Expected Sign(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNegative(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, false}, + {-1, true}, + {10, false}, + {3.14, false}, + {-96, true}, + {-10e-12, true}, + } + for _, test := range tests { + actual := IsNegative(test.param) + if actual != test.expected { + t.Errorf("Expected IsNegative(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNonNegative(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, true}, + {-1, false}, + {10, true}, + {3.14, true}, + {-96, false}, + {-10e-12, false}, + } + for _, test := range tests { + actual := IsNonNegative(test.param) + if actual != test.expected { + t.Errorf("Expected IsNonNegative(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsPositive(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, false}, + {-1, false}, + {10, true}, + {3.14, true}, + {-96, false}, + {-10e-12, false}, + } + for _, test := range tests { + actual := IsPositive(test.param) + if actual != test.expected { + t.Errorf("Expected IsPositive(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNonPositive(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, true}, + {-1, true}, + {10, false}, + {3.14, false}, + {-96, true}, + {-10e-12, true}, + } + for _, test := range tests { + actual := IsNonPositive(test.param) + if actual != test.expected { + t.Errorf("Expected IsNonPositive(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsWhole(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, true}, + {-1, true}, + {10, true}, + {3.14, false}, + {-96, true}, + {-10e-12, false}, + } + for _, test := range tests { + actual := IsWhole(test.param) + if actual != test.expected { + t.Errorf("Expected IsWhole(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNatural(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + expected bool + }{ + {0, false}, + {-1, false}, + {10, true}, + {3.14, false}, + {96, true}, + {-10e-12, false}, + } + for _, test := range tests { + actual := IsNatural(test.param) + if actual != test.expected { + t.Errorf("Expected IsNatural(%v) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestInRangeInt(t *testing.T) { + t.Parallel() + + var testAsInts = []struct { + param int + left int + right int + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInts { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt8s = []struct { + param int8 + left int8 + right int8 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt8s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int8", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt16s = []struct { + param int16 + left int16 + right int16 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt16s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int16", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt32s = []struct { + param int32 + left int32 + right int32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt32s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int32", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt64s = []struct { + param int64 + left int64 + right int64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt64s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int64", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInts = []struct { + param uint + left uint + right uint + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInts { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt8s = []struct { + param uint8 + left uint8 + right uint8 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt8s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt16s = []struct { + param uint16 + left uint16 + right uint16 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt16s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt32s = []struct { + param uint32 + left uint32 + right uint32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt32s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt64s = []struct { + param uint64 + left uint64 + right uint64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt64s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsStrings = []struct { + param string + left string + right string + expected bool + }{ + {"0", "0", "0", true}, + {"1", "0", "0", false}, + {"-1", "0", "0", false}, + {"0", "-1", "1", true}, + {"0", "0", "1", true}, + {"0", "-1", "0", true}, + {"0", "0", "-1", true}, + {"0", "10", "5", false}, + } + for _, test := range testAsStrings { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type string", test.param, test.left, test.right, test.expected, actual) + } + } +} + +func TestInRangeFloat32(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float32 + left float32 + right float32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range tests { + actual := InRangeFloat32(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeFloat32(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } +} + +func TestInRangeFloat64(t *testing.T) { + t.Parallel() + + var tests = []struct { + param float64 + left float64 + right float64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range tests { + actual := InRangeFloat64(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeFloat64(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } +} + +func TestInRange(t *testing.T) { + t.Parallel() + + var testsInt = []struct { + param int + left int + right int + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testsInt { + actual := InRange(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRange(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } + + var testsFloat32 = []struct { + param float32 + left float32 + right float32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testsFloat32 { + actual := InRange(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRange(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } + + var testsFloat64 = []struct { + param float64 + left float64 + right float64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testsFloat64 { + actual := InRange(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRange(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } + + var testsTypeMix = []struct { + param int + left float64 + right float64 + expected bool + }{ + {0, 0, 0, false}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, false}, + {0, 0, 1, false}, + {0, -1, 0, false}, + {0, 0, -1, false}, + {0, 10, 5, false}, + } + for _, test := range testsTypeMix { + actual := InRange(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRange(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + } + } +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 000000000..61a05d438 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,101 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 000000000..4f7e9274a --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,636 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": isInRaw, + "rsapub": IsRsaPub, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'ÃŽle)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (ÃŽles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les ÃŽles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les ÃŽles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'ÃŽle)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les ÃŽles)/ Keeling (les ÃŽles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les ÃŽles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les ÃŽles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les ÃŽles)/Malouines (les ÃŽles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les ÃŽles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Ã…land Islands", "Ã…land(les ÃŽles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-ÃŽles MacDonald (l'ÃŽle)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'ÃŽle)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les ÃŽles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "ÃŽles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (ÃŽles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'ÃŽle Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les ÃŽles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "ÃŽle de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les ÃŽles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "BokmÃ¥l, Norwegian; Norwegian BokmÃ¥l"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 000000000..a0b706a74 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains check if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches check if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trim characters from the left-side of the input. +// If second argument is empty, it's will be remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trim characters from the right-side of the input. +// If second argument is empty, it's will be remove spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trim characters from both sides of the input. +// If second argument is empty, it's will be remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList remove characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList remove characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replace regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replace <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse return reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines split string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine return specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags remove all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName return safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pad left side of string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pad right side of string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pad sides of string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides, not the padding string can be unicode and more then one +// character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go b/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go new file mode 100644 index 000000000..2ac4b6102 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go @@ -0,0 +1,17 @@ +package govalidator + +import "testing" + +func BenchmarkContains(b *testing.B) { + b.ResetTimer() + for n := 0; n < b.N; n++ { + Contains("a0b01c012deffghijklmnopqrstu0123456vwxyz", "0123456789") + } +} + +func BenchmarkMatches(b *testing.B) { + b.ResetTimer() + for n := 0; n < b.N; n++ { + Matches("alfkjl12309fdjldfsa209jlksdfjLAKJjs9uJH234", "[\\w\\d]+") + } +} diff --git a/vendor/github.com/asaskevich/govalidator/utils_test.go b/vendor/github.com/asaskevich/govalidator/utils_test.go new file mode 100644 index 000000000..97c97a534 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils_test.go @@ -0,0 +1,502 @@ +package govalidator + +import ( + "reflect" + "testing" +) + +func TestContains(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected bool + }{ + {"abacada", "", true}, + {"abacada", "ritir", false}, + {"abacada", "a", true}, + {"abacada", "aca", true}, + } + for _, test := range tests { + actual := Contains(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected Contains(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestMatches(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected bool + }{ + {"123456789", "[0-9]+", true}, + {"abacada", "cab$", false}, + {"111222333", "((111|222|333)+)+", true}, + {"abacaba", "((123+]", false}, + } + for _, test := range tests { + actual := Matches(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected Matches(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestLeftTrim(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected string + }{ + {" \r\n\tfoo \r\n\t ", "", "foo \r\n\t "}, + {"010100201000", "01", "201000"}, + } + for _, test := range tests { + actual := LeftTrim(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected LeftTrim(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestRightTrim(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected string + }{ + {" \r\n\tfoo \r\n\t ", "", " \r\n\tfoo"}, + {"010100201000", "01", "0101002"}, + } + for _, test := range tests { + actual := RightTrim(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected RightTrim(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestTrim(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected string + }{ + {" \r\n\tfoo \r\n\t ", "", "foo"}, + {"010100201000", "01", "2"}, + {"1234567890987654321", "1-8", "909"}, + } + for _, test := range tests { + actual := Trim(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected Trim(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +// This small example illustrate how to work with Trim function. +func ExampleTrim() { + // Remove from left and right spaces and "\r", "\n", "\t" characters + println(Trim(" \r\r\ntext\r \t\n", "") == "text") + // Remove from left and right characters that are between "1" and "8". + // "1-8" is like full list "12345678". + println(Trim("1234567890987654321", "1-8") == "909") +} + +func TestWhiteList(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected string + }{ + {"abcdef", "abc", "abc"}, + {"aaaaaaaaaabbbbbbbbbb", "abc", "aaaaaaaaaabbbbbbbbbb"}, + {"a1b2c3", "abc", "abc"}, + {" ", "abc", ""}, + {"a3a43a5a4a3a2a23a4a5a4a3a4", "a-z", "aaaaaaaaaaaa"}, + } + for _, test := range tests { + actual := WhiteList(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected WhiteList(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +// This small example illustrate how to work with WhiteList function. +func ExampleWhiteList() { + // Remove all characters from string ignoring characters between "a" and "z" + println(WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +} + +func TestBlackList(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected string + }{ + {"abcdef", "abc", "def"}, + {"aaaaaaaaaabbbbbbbbbb", "abc", ""}, + {"a1b2c3", "abc", "123"}, + {" ", "abc", " "}, + {"a3a43a5a4a3a2a23a4a5a4a3a4", "a-z", "34354322345434"}, + } + for _, test := range tests { + actual := BlackList(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected BlackList(%q,%q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestStripLow(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 bool + expected string + }{ + {"foo\x00", false, "foo"}, + {"\x7Ffoo\x02", false, "foo"}, + {"\x01\x09", false, ""}, + {"foo\x0A\x0D", false, "foo"}, + {"perch\u00e9", false, "perch\u00e9"}, + {"\u20ac", false, "\u20ac"}, + {"\u2206\x0A", false, "\u2206"}, + {"foo\x0A\x0D", true, "foo\x0A\x0D"}, + {"\x03foo\x0A\x0D", true, "foo\x0A\x0D"}, + } + for _, test := range tests { + actual := StripLow(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected StripLow(%q,%t) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestReplacePattern(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + param3 string + expected string + }{ + {"ab123ba", "[0-9]+", "aca", "abacaba"}, + {"abacaba", "[0-9]+", "aca", "abacaba"}, + {"httpftp://github.comio", "(ftp|io)", "", "http://github.com"}, + {"aaaaaaaaaa", "a", "", ""}, + {"http123123ftp://git534543hub.comio", "(ftp|io|[0-9]+)", "", "http://github.com"}, + } + for _, test := range tests { + actual := ReplacePattern(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected ReplacePattern(%q,%q,%q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} + +// This small example illustrate how to work with ReplacePattern function. +func ExampleReplacePattern() { + // Replace in "http123123ftp://git534543hub.comio" following (pattern "(ftp|io|[0-9]+)"): + // - Sequence "ftp". + // - Sequence "io". + // - Sequence of digits. + // with empty string. + println(ReplacePattern("http123123ftp://git534543hub.comio", "(ftp|io|[0-9]+)", "") == "http://github.com") +} + +func TestEscape(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {`foo&bar`, "<img alt="foo&bar">"}, + } + for _, test := range tests { + actual := Escape(test.param) + if actual != test.expected { + t.Errorf("Expected Escape(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestUnderscoreToCamelCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"a_b_c", "ABC"}, + {"my_func", "MyFunc"}, + {"1ab_cd", "1abCd"}, + } + for _, test := range tests { + actual := UnderscoreToCamelCase(test.param) + if actual != test.expected { + t.Errorf("Expected UnderscoreToCamelCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestCamelCaseToUnderscore(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"MyFunc", "my_func"}, + {"ABC", "a_b_c"}, + {"1B", "1_b"}, + {"foo_bar", "foo_bar"}, + {"FooV2Bar", "foo_v2_bar"}, + } + for _, test := range tests { + actual := CamelCaseToUnderscore(test.param) + if actual != test.expected { + t.Errorf("Expected CamelCaseToUnderscore(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestReverse(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"abc", "cba"}, + {"カタカナ", "ナカタカ"}, + } + for _, test := range tests { + actual := Reverse(test.param) + if actual != test.expected { + t.Errorf("Expected Reverse(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestGetLines(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected []string + }{ + {"abc", []string{"abc"}}, + {"a\nb\nc", []string{"a", "b", "c"}}, + } + for _, test := range tests { + actual := GetLines(test.param) + if !reflect.DeepEqual(actual, test.expected) { + t.Errorf("Expected GetLines(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestGetLine(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 int + expected string + }{ + {"abc", 0, "abc"}, + {"a\nb\nc", 0, "a"}, + {"abc", -1, ""}, + {"abacaba\n", 1, ""}, + {"abc", 3, ""}, + } + for _, test := range tests { + actual, _ := GetLine(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected GetLine(%q, %d) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +func TestRemoveTags(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"abc", "abc"}, + {"", ""}, + {"", "Text"}, + {`Link`, "Link"}, + } + for _, test := range tests { + actual := RemoveTags(test.param) + if actual != test.expected { + t.Errorf("Expected RemoveTags(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestSafeFileName(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"abc", "abc"}, + {"123456789 '_-?ASDF@£$%£%^é.html", "123456789-asdf.html"}, + {"ReadMe.md", "readme.md"}, + {"file:///c:/test.go", "test.go"}, + {"../../../Hello World!.txt", "hello-world.txt"}, + } + for _, test := range tests { + actual := SafeFileName(test.param) + if actual != test.expected { + t.Errorf("Expected SafeFileName(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestNormalizeEmail(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {`test@me.com`, `test@me.com`}, + {`some.name@gmail.com`, `somename@gmail.com`}, + {`some.name@googlemail.com`, `somename@gmail.com`}, + {`some.name+extension@gmail.com`, `somename@gmail.com`}, + {`some.name+extension@googlemail.com`, `somename@gmail.com`}, + {`some.name.middlename+extension@gmail.com`, `somenamemiddlename@gmail.com`}, + {`some.name.middlename+extension@googlemail.com`, `somenamemiddlename@gmail.com`}, + {`some.name.midd.lena.me.+extension@gmail.com`, `somenamemiddlename@gmail.com`}, + {`some.name.midd.lena.me.+extension@googlemail.com`, `somenamemiddlename@gmail.com`}, + {`some.name+extension@unknown.com`, `some.name+extension@unknown.com`}, + // TODO: {`hans@m端ller.com`, `hans@m端ller.com`}, + {`hans`, ``}, + } + for _, test := range tests { + actual, err := NormalizeEmail(test.param) + if actual != test.expected { + t.Errorf("Expected NormalizeEmail(%q) to be %v, got %v, err %v", test.param, test.expected, actual, err) + } + } +} + +func TestTruncate(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 int + param3 string + expected string + }{ + {`Lorem ipsum dolor sit amet, consectetur adipiscing elit.`, 25, `...`, `Lorem ipsum dolor sit amet...`}, + {`Measuring programming progress by lines of code is like measuring aircraft building progress by weight.`, 35, ` new born babies!`, `Measuring programming progress by new born babies!`}, + {`Testestestestestestestestestest testestestestestestestestest`, 7, `...`, `Testestestestestestestestestest...`}, + {`Testing`, 7, `...`, `Testing`}, + } + for _, test := range tests { + actual := Truncate(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected Truncate(%q, %d, %q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} + +func TestPadLeft(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + param3 int + expected string + }{ + {"ã“ã‚“ã«ã¡ã¯", "xyz", 12, "xyzxyzxã“ã‚“ã«ã¡ã¯"}, + {"ã“ã‚“ã«ã¡ã¯", "xyz", 11, "xyzxyzã“ã‚“ã«ã¡ã¯"}, + {"abc", "x", 5, "xxabc"}, + {"abc", "xyz", 5, "xyabc"}, + {"abcde", "xyz", 5, "abcde"}, + {"abcde", "xyz", 4, "abcde"}, + } + for _, test := range tests { + actual := PadLeft(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected PadLeft(%q,%q,%q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} + +func TestPadRight(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + param3 int + expected string + }{ + {"ã“ã‚“ã«ã¡ã¯", "xyz", 12, "ã“ã‚“ã«ã¡ã¯xyzxyzx"}, + {"ã“ã‚“ã«ã¡ã¯", "xyz", 11, "ã“ã‚“ã«ã¡ã¯xyzxyz"}, + {"abc", "x", 5, "abcxx"}, + {"abc", "xyz", 5, "abcxy"}, + {"abcde", "xyz", 5, "abcde"}, + {"abcde", "xyz", 4, "abcde"}, + } + for _, test := range tests { + actual := PadRight(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected PadRight(%q,%q,%q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} + +func TestPadBoth(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + param3 int + expected string + }{ + {"ã“ã‚“ã«ã¡ã¯", "xyz", 12, "xyzã“ã‚“ã«ã¡ã¯xyzx"}, + {"ã“ã‚“ã«ã¡ã¯", "xyz", 11, "xyzã“ã‚“ã«ã¡ã¯xyz"}, + {"abc", "x", 5, "xabcx"}, + {"abc", "xyz", 5, "xabcx"}, + {"abcde", "xyz", 5, "abcde"}, + {"abcde", "xyz", 4, "abcde"}, + } + for _, test := range tests { + actual := PadBoth(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected PadBoth(%q,%q,%q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 000000000..b18bbcb4c --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1278 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman â…¨ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + len := "0" + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + fmt.Println(err) + return err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func isInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return ValidateStruct(v.Interface()) + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e.(type) { + case Error: + m[e.(Error).Name] = e.(Error).Err.Error() + case Errors: + for _, item := range e.(Errors).Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/validator_test.go b/vendor/github.com/asaskevich/govalidator/validator_test.go new file mode 100644 index 000000000..e2400d9c3 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator_test.go @@ -0,0 +1,3490 @@ +package govalidator + +import ( + "fmt" + "strings" + "testing" + "time" +) + +func init() { + CustomTypeTagMap.Set("customFalseValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + return false + })) + CustomTypeTagMap.Set("customTrueValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + return true + })) +} + +func TestIsAlpha(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", false}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", false}, + {"abc〩", false}, + {"abc", true}, + {"소주", false}, + {"ABC", true}, + {"FoObAr", true}, + {"소aBC", false}, + {"소", false}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", true}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", false}, //UTF-8(ASCII): 0 + {"123", false}, + {"0123", false}, + {"-00123", false}, + {"0", false}, + {"-0", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"-1¾", false}, + {"1¾", false}, + {"〥〩", false}, + {"모ìž", false}, + {"ix", true}, + {"Û³ÛµÛ¶Û°", false}, + {"1--", false}, + {"1-1", false}, + {"-", false}, + {"--", false}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", false}, + } + for _, test := range tests { + actual := IsAlpha(test.param) + if actual != test.expected { + t.Errorf("Expected IsAlpha(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUTFLetter(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", false}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", false}, + {"abc〩", false}, + {"", true}, + {"abc", true}, + {"소주", true}, + {"ABC", true}, + {"FoObAr", true}, + {"소aBC", true}, + {"소", true}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", true}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", false}, //UTF-8(ASCII): 0 + {"123", false}, + {"0123", false}, + {"-00123", false}, + {"0", false}, + {"-0", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"-1¾", false}, + {"1¾", false}, + {"〥〩", false}, + {"모ìž", true}, + {"ix", true}, + {"Û³ÛµÛ¶Û°", false}, + {"1--", false}, + {"1-1", false}, + {"-", false}, + {"--", false}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", false}, + } + for _, test := range tests { + actual := IsUTFLetter(test.param) + if actual != test.expected { + t.Errorf("Expected IsUTFLetter(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsAlphanumeric(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", false}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc123", true}, + {"ABC111", true}, + {"abc1", true}, + {"abc〩", false}, + {"abc", true}, + {"소주", false}, + {"ABC", true}, + {"FoObAr", true}, + {"소aBC", false}, + {"소", false}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", true}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", true}, //UTF-8(ASCII): 0 + {"123", true}, + {"0123", true}, + {"-00123", false}, + {"0", true}, + {"-0", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"-1¾", false}, + {"1¾", false}, + {"〥〩", false}, + {"모ìž", false}, + {"ix", true}, + {"Û³ÛµÛ¶Û°", false}, + {"1--", false}, + {"1-1", false}, + {"-", false}, + {"--", false}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", false}, + } + for _, test := range tests { + actual := IsAlphanumeric(test.param) + if actual != test.expected { + t.Errorf("Expected IsAlphanumeric(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUTFLetterNumeric(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", true}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", true}, + {"abc〩", true}, + {"abc", true}, + {"소주", true}, + {"ABC", true}, + {"FoObAr", true}, + {"소aBC", true}, + {"소", true}, + {"달기&Co.", false}, + {"〩Hours", true}, + {"\ufff0", false}, + {"\u0070", true}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", true}, //UTF-8(ASCII): 0 + {"123", true}, + {"0123", true}, + {"-00123", false}, + {"0", true}, + {"-0", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"-1¾", false}, + {"1¾", true}, + {"〥〩", true}, + {"모ìž", true}, + {"ix", true}, + {"Û³ÛµÛ¶Û°", true}, + {"1--", false}, + {"1-1", false}, + {"-", false}, + {"--", false}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", false}, + } + for _, test := range tests { + actual := IsUTFLetterNumeric(test.param) + if actual != test.expected { + t.Errorf("Expected IsUTFLetterNumeric(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNumeric(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", false}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", false}, + {"abc〩", false}, + {"abc", false}, + {"소주", false}, + {"ABC", false}, + {"FoObAr", false}, + {"소aBC", false}, + {"소", false}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", false}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", true}, //UTF-8(ASCII): 0 + {"123", true}, + {"0123", true}, + {"-00123", false}, + {"+00123", false}, + {"0", true}, + {"-0", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"12ð…ª3", false}, + {"-1¾", false}, + {"1¾", false}, + {"〥〩", false}, + {"모ìž", false}, + {"ix", false}, + {"Û³ÛµÛ¶Û°", false}, + {"1--", false}, + {"1-1", false}, + {"-", false}, + {"--", false}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", false}, + } + for _, test := range tests { + actual := IsNumeric(test.param) + if actual != test.expected { + t.Errorf("Expected IsNumeric(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUTFNumeric(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"\n", false}, + {"\r", false}, + {"â…¨", true}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", false}, + {"abc〩", false}, + {"abc", false}, + {"소주", false}, + {"ABC", false}, + {"FoObAr", false}, + {"소aBC", false}, + {"소", false}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", false}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", true}, //UTF-8(ASCII): 0 + {"123", true}, + {"0123", true}, + {"-00123", true}, + {"0", true}, + {"-0", true}, + {"--0", false}, + {"-0-", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"12ð…ª3", true}, + {"-1¾", true}, + {"1¾", true}, + {"〥〩", true}, + {"모ìž", false}, + {"ix", false}, + {"Û³ÛµÛ¶Û°", true}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", true}, + } + for _, test := range tests { + actual := IsUTFNumeric(test.param) + if actual != test.expected { + t.Errorf("Expected IsUTFNumeric(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUTFDigit(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + + {"\n", false}, + {"\r", false}, + {"â…¨", false}, + {"", true}, + {" fooo ", false}, + {"abc!!!", false}, + {"abc1", false}, + {"abc〩", false}, + {"abc", false}, + {"소주", false}, + {"ABC", false}, + {"FoObAr", false}, + {"소aBC", false}, + {"소", false}, + {"달기&Co.", false}, + {"〩Hours", false}, + {"\ufff0", false}, + {"\u0070", false}, //UTF-8(ASCII): p + {"\u0026", false}, //UTF-8(ASCII): & + {"\u0030", true}, //UTF-8(ASCII): 0 + {"123", true}, + {"0123", true}, + {"-00123", true}, + {"0", true}, + {"-0", true}, + {"--0", false}, + {"-0-", false}, + {"123.123", false}, + {" ", false}, + {".", false}, + {"12ð…ª3", false}, + {"1483920", true}, + {"", true}, + {"Û³ÛµÛ¶Û°", true}, + {"-29", true}, + {"-1¾", false}, + {"1¾", false}, + {"〥〩", false}, + {"모ìž", false}, + {"ix", false}, + {"Û³ÛµÛ¶Û°", true}, + {"1++", false}, + {"1+1", false}, + {"+", false}, + {"++", false}, + {"+1", true}, + } + for _, test := range tests { + actual := IsUTFDigit(test.param) + if actual != test.expected { + t.Errorf("Expected IsUTFDigit(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsLowerCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", true}, + {"abc", true}, + {"a b c", true}, + {"abcß", true}, + {"abcẞ", false}, + {"ABCẞ", false}, + {"tr竪s 端ber", true}, + {"fooBar", false}, + {"123ABC", false}, + {"ABC123", false}, + {"ABC", false}, + {"S T R", false}, + {"fooBar", false}, + {"abacaba123", true}, + } + for _, test := range tests { + actual := IsLowerCase(test.param) + if actual != test.expected { + t.Errorf("Expected IsLowerCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUpperCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", false}, + {"abc", false}, + {"a b c", false}, + {"abcß", false}, + {"abcẞ", false}, + {"ABCẞ", true}, + {"tr竪s 端ber", false}, + {"fooBar", false}, + {"123ABC", true}, + {"ABC123", true}, + {"ABC", true}, + {"S T R", true}, + {"fooBar", false}, + {"abacaba123", false}, + } + for _, test := range tests { + actual := IsUpperCase(test.param) + if actual != test.expected { + t.Errorf("Expected IsUpperCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestHasLowerCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", true}, + {"abc", true}, + {"a b c", true}, + {"abcß", true}, + {"abcẞ", true}, + {"ABCẞ", false}, + {"tr竪s 端ber", true}, + {"fooBar", true}, + {"123ABC", false}, + {"ABC123", false}, + {"ABC", false}, + {"S T R", false}, + {"fooBar", true}, + {"abacaba123", true}, + {"FÒÔBÀŘ", false}, + {"fòôbàř", true}, + {"fÒÔBÀŘ", true}, + } + for _, test := range tests { + actual := HasLowerCase(test.param) + if actual != test.expected { + t.Errorf("Expected HasLowerCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestHasUpperCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", false}, + {"abc", false}, + {"a b c", false}, + {"abcß", false}, + {"abcẞ", false}, + {"ABCẞ", true}, + {"tr竪s 端ber", false}, + {"fooBar", true}, + {"123ABC", true}, + {"ABC123", true}, + {"ABC", true}, + {"S T R", true}, + {"fooBar", true}, + {"abacaba123", false}, + {"FÒÔBÀŘ", true}, + {"fòôbàř", false}, + {"Fòôbàř", true}, + } + for _, test := range tests { + actual := HasUpperCase(test.param) + if actual != test.expected { + t.Errorf("Expected HasUpperCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsInt(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"-2147483648", true}, //Signed 32 Bit Min Int + {"2147483647", true}, //Signed 32 Bit Max Int + {"-2147483649", true}, //Signed 32 Bit Min Int - 1 + {"2147483648", true}, //Signed 32 Bit Max Int + 1 + {"4294967295", true}, //Unsigned 32 Bit Max Int + {"4294967296", true}, //Unsigned 32 Bit Max Int + 1 + {"-9223372036854775808", true}, //Signed 64 Bit Min Int + {"9223372036854775807", true}, //Signed 64 Bit Max Int + {"-9223372036854775809", true}, //Signed 64 Bit Min Int - 1 + {"9223372036854775808", true}, //Signed 64 Bit Max Int + 1 + {"18446744073709551615", true}, //Unsigned 64 Bit Max Int + {"18446744073709551616", true}, //Unsigned 64 Bit Max Int + 1 + {"", true}, + {"123", true}, + {"0", true}, + {"-0", true}, + {"+0", true}, + {"01", false}, + {"123.123", false}, + {" ", false}, + {"000", false}, + } + for _, test := range tests { + actual := IsInt(test.param) + if actual != test.expected { + t.Errorf("Expected IsInt(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsHash(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + algo string + expected bool + }{ + {"3ca25ae354e192b26879f651a51d92aa8a34d8d3", "sha1", true}, + {"3ca25ae354e192b26879f651a51d34d8d3", "sha1", false}, + {"3ca25ae354e192b26879f651a51d92aa8a34d8d3", "Tiger160", true}, + {"3ca25ae354e192b26879f651a51d34d8d3", "ripemd160", false}, + {"579282cfb65ca1f109b78536effaf621b853c9f7079664a3fbe2b519f435898c", "sha256", true}, + {"579282cfb65ca1f109b78536effaf621b853c9f7079664a3fbe2b519f435898casfdsafsadfsdf", "sha256", false}, + {"bf547c3fc5841a377eb1519c2890344dbab15c40ae4150b4b34443d2212e5b04aa9d58865bf03d8ae27840fef430b891", "sha384", true}, + {"579282cfb65ca1f109b78536effaf621b853c9f7079664a3fbe2b519f435898casfdsafsadfsdf", "sha384", false}, + {"45bc5fa8cb45ee408c04b6269e9f1e1c17090c5ce26ffeeda2af097735b29953ce547e40ff3ad0d120e5361cc5f9cee35ea91ecd4077f3f589b4d439168f91b9", "sha512", true}, + {"579282cfb65ca1f109b78536effaf621b853c9f7079664a3fbe2b519f435898casfdsafsadfsdf", "sha512", false}, + {"46fc0125a148788a3ac1d649566fc04eb84a746f1a6e4fa7", "tiger192", true}, + {"46fc0125a148788a3ac1d649566fc04eb84a746f1a6$$%@^", "TIGER192", false}, + {"46fc0125a148788a3ac1d649566fc04eb84a746f1a6$$%@^", "SOMEHASH", false}, + } + for _, test := range tests { + actual := IsHash(test.param, test.algo) + if actual != test.expected { + t.Errorf("Expected IsHash(%q, %q) to be %v, got %v", test.param, test.algo, test.expected, actual) + } + } +} + +func TestIsExistingEmail(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo@bar.com", true}, + {"foo@bar.com.au", true}, + {"foo+bar@bar.com", true}, + {"foo@bar.coffee", true}, + {"foo@bar.coffee..coffee", false}, + {"invalidemail@", false}, + {"invalid.com", false}, + {"@invalid.com", false}, + {"NathAn.daVIeS@DomaIn.cOM", true}, + {"NATHAN.DAVIES@DOMAIN.CO.UK", true}, + } + for _, test := range tests { + actual := IsExistingEmail(test.param) + if actual != test.expected { + t.Errorf("Expected IsExistingEmail(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsEmail(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo@bar.com", true}, + {"x@x.x", true}, + {"foo@bar.com.au", true}, + {"foo+bar@bar.com", true}, + {"foo@bar.coffee", true}, + {"foo@bar.coffee..coffee", false}, + {"foo@bar.bar.coffee", true}, + {"foo@bar.中文网", true}, + {"invalidemail@", false}, + {"invalid.com", false}, + {"@invalid.com", false}, + {"test|123@m端ller.com", true}, + {"hans@m端ller.com", true}, + {"hans.m端ller@test.com", true}, + {"NathAn.daVIeS@DomaIn.cOM", true}, + {"NATHAN.DAVIES@DOMAIN.CO.UK", true}, + } + for _, test := range tests { + actual := IsEmail(test.param) + if actual != test.expected { + t.Errorf("Expected IsEmail(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsURL(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"http://foo.bar#com", true}, + {"http://foobar.com", true}, + {"https://foobar.com", true}, + {"foobar.com", true}, + {"http://foobar.coffee/", true}, + {"http://foobar.中文网/", true}, + {"http://foobar.org/", true}, + {"http://foobar.ORG", true}, + {"http://foobar.org:8080/", true}, + {"ftp://foobar.ru/", true}, + {"ftp.foo.bar", true}, + {"http://user:pass@www.foobar.com/", true}, + {"http://user:pass@www.foobar.com/path/file", true}, + {"http://127.0.0.1/", true}, + {"http://duckduckgo.com/?q=%2F", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/?foo=bar#baz=qux", true}, + {"http://foobar.com?foo=bar", true}, + {"http://www.xn--froschgrn-x9a.net/", true}, + {"http://foobar.com/a-", true}, + {"http://foobar.پاکستان/", true}, + {"http://foobar.c_o_m", false}, + {"http://_foobar.com", false}, + {"http://foo_bar.com", true}, + {"http://user:pass@foo_bar_bar.bar_foo.com", true}, + {"", false}, + {"xyz://foobar.com", false}, + // {"invalid.", false}, is it false like "localhost."? + {".com", false}, + {"rtmp://foobar.com", false}, + {"http://localhost:3000/", true}, + {"http://foobar.com#baz=qux", true}, + {"http://foobar.com/t$-_.+!*\\'(),", true}, + {"http://www.foobar.com/~foobar", true}, + {"http://www.-foobar.com/", false}, + {"http://www.foo---bar.com/", false}, + {"http://r6---snnvoxuioq6.googlevideo.com", true}, + {"mailto:someone@example.com", true}, + {"irc://irc.server.org/channel", false}, + {"irc://#channel@network", true}, + {"/abs/test/dir", false}, + {"./rel/test/dir", false}, + {"http://foo^bar.org", false}, + {"http://foo&*bar.org", false}, + {"http://foo&bar.org", false}, + {"http://foo bar.org", false}, + {"http://foo.bar.org", true}, + {"http://www.foo.bar.org", true}, + {"http://www.foo.co.uk", true}, + {"foo", false}, + {"http://.foo.com", false}, + {"http://,foo.com", false}, + {",foo.com", false}, + {"http://myservice.:9093/", true}, + // according to issues #62 #66 + {"https://pbs.twimg.com/profile_images/560826135676588032/j8fWrmYY_normal.jpeg", true}, + // according to #125 + {"http://prometheus-alertmanager.service.q:9093", true}, + {"aio1_alertmanager_container-63376c45:9093", true}, + {"https://www.logn-123-123.url.with.sigle.letter.d:12345/url/path/foo?bar=zzz#user", true}, + {"http://me.example.com", true}, + {"http://www.me.example.com", true}, + {"https://farm6.static.flickr.com", true}, + {"https://zh.wikipedia.org/wiki/Wikipedia:%E9%A6%96%E9%A1%B5", true}, + {"google", false}, + // According to #87 + {"http://hyphenated-host-name.example.co.in", true}, + {"http://cant-end-with-hyphen-.example.com", false}, + {"http://-cant-start-with-hyphen.example.com", false}, + {"http://www.domain-can-have-dashes.com", true}, + {"http://m.abcd.com/test.html", true}, + {"http://m.abcd.com/a/b/c/d/test.html?args=a&b=c", true}, + {"http://[::1]:9093", true}, + {"http://[::1]:909388", false}, + {"1200::AB00:1234::2552:7777:1313", false}, + {"http://[2001:db8:a0b:12f0::1]/index.html", true}, + {"http://[1200:0000:AB00:1234:0000:2552:7777:1313]", true}, + {"http://user:pass@[::1]:9093/a/b/c/?a=v#abc", true}, + {"https://127.0.0.1/a/b/c?a=v&c=11d", true}, + {"https://foo_bar.example.com", true}, + {"http://foo_bar.example.com", true}, + {"http://foo_bar_fizz_buzz.example.com", true}, + {"http://_cant_start_with_underescore", false}, + {"http://cant_end_with_underescore_", false}, + {"foo_bar.example.com", true}, + {"foo_bar_fizz_buzz.example.com", true}, + {"http://hello_world.example.com", true}, + // According to #212 + {"foo_bar-fizz-buzz:1313", true}, + {"foo_bar-fizz-buzz:13:13", false}, + {"foo_bar-fizz-buzz://1313", false}, + } + for _, test := range tests { + actual := IsURL(test.param) + if actual != test.expected { + t.Errorf("Expected IsURL(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsRequestURL(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"http://foo.bar/#com", true}, + {"http://foobar.com", true}, + {"https://foobar.com", true}, + {"foobar.com", false}, + {"http://foobar.coffee/", true}, + {"http://foobar.中文网/", true}, + {"http://foobar.org/", true}, + {"http://foobar.org:8080/", true}, + {"ftp://foobar.ru/", true}, + {"http://user:pass@www.foobar.com/", true}, + {"http://127.0.0.1/", true}, + {"http://duckduckgo.com/?q=%2F", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/?foo=bar#baz=qux", true}, + {"http://foobar.com?foo=bar", true}, + {"http://www.xn--froschgrn-x9a.net/", true}, + {"", false}, + {"xyz://foobar.com", true}, + {"invalid.", false}, + {".com", false}, + {"rtmp://foobar.com", true}, + {"http://www.foo_bar.com/", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/#baz=qux", true}, + {"http://foobar.com/t$-_.+!*\\'(),", true}, + {"http://www.foobar.com/~foobar", true}, + {"http://www.-foobar.com/", true}, + {"http://www.foo---bar.com/", true}, + {"mailto:someone@example.com", true}, + {"irc://irc.server.org/channel", true}, + {"/abs/test/dir", false}, + {"./rel/test/dir", false}, + } + for _, test := range tests { + actual := IsRequestURL(test.param) + if actual != test.expected { + t.Errorf("Expected IsRequestURL(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsRequestURI(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"http://foo.bar/#com", true}, + {"http://foobar.com", true}, + {"https://foobar.com", true}, + {"foobar.com", false}, + {"http://foobar.coffee/", true}, + {"http://foobar.中文网/", true}, + {"http://foobar.org/", true}, + {"http://foobar.org:8080/", true}, + {"ftp://foobar.ru/", true}, + {"http://user:pass@www.foobar.com/", true}, + {"http://127.0.0.1/", true}, + {"http://duckduckgo.com/?q=%2F", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/?foo=bar#baz=qux", true}, + {"http://foobar.com?foo=bar", true}, + {"http://www.xn--froschgrn-x9a.net/", true}, + {"xyz://foobar.com", true}, + {"invalid.", false}, + {".com", false}, + {"rtmp://foobar.com", true}, + {"http://www.foo_bar.com/", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/#baz=qux", true}, + {"http://foobar.com/t$-_.+!*\\'(),", true}, + {"http://www.foobar.com/~foobar", true}, + {"http://www.-foobar.com/", true}, + {"http://www.foo---bar.com/", true}, + {"mailto:someone@example.com", true}, + {"irc://irc.server.org/channel", true}, + {"/abs/test/dir", true}, + {"./rel/test/dir", false}, + } + for _, test := range tests { + actual := IsRequestURI(test.param) + if actual != test.expected { + t.Errorf("Expected IsRequestURI(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsFloat(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {" ", false}, + {"-.123", false}, + {"abacaba", false}, + {"1f", false}, + {"-1f", false}, + {"+1f", false}, + {"123", true}, + {"123.", true}, + {"123.123", true}, + {"-123.123", true}, + {"+123.123", true}, + {"0.123", true}, + {"-0.123", true}, + {"+0.123", true}, + {".0", true}, + {"01.123", true}, + {"-0.22250738585072011e-307", true}, + {"+0.22250738585072011e-307", true}, + } + for _, test := range tests { + actual := IsFloat(test.param) + if actual != test.expected { + t.Errorf("Expected IsFloat(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsHexadecimal(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"abcdefg", false}, + {"", false}, + {"..", false}, + {"deadBEEF", true}, + {"ff0044", true}, + } + for _, test := range tests { + actual := IsHexadecimal(test.param) + if actual != test.expected { + t.Errorf("Expected IsHexadecimal(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsHexcolor(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"#ff", false}, + {"fff0", false}, + {"#ff12FG", false}, + {"CCccCC", true}, + {"fff", true}, + {"#f00", true}, + } + for _, test := range tests { + actual := IsHexcolor(test.param) + if actual != test.expected { + t.Errorf("Expected IsHexcolor(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsRGBcolor(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"rgb(0,31,255)", true}, + {"rgb(1,349,275)", false}, + {"rgb(01,31,255)", false}, + {"rgb(0.6,31,255)", false}, + {"rgba(0,31,255)", false}, + {"rgb(0, 31, 255)", true}, + } + for _, test := range tests { + actual := IsRGBcolor(test.param) + if actual != test.expected { + t.Errorf("Expected IsRGBcolor(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsNull(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"abacaba", false}, + {"", true}, + } + for _, test := range tests { + actual := IsNull(test.param) + if actual != test.expected { + t.Errorf("Expected IsNull(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestHasWhitespaceOnly(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"abacaba", false}, + {"", false}, + {" ", true}, + {" \r\n ", true}, + {"\014\012\011\013\015", true}, + {"\014\012\011\013 abc \015", false}, + {"\f\n\t\v\r\f", true}, + {"x\n\t\t\t\t", false}, + {"\f\n\t \n\n\n \v\r\f", true}, + } + for _, test := range tests { + actual := HasWhitespaceOnly(test.param) + if actual != test.expected { + t.Errorf("Expected HasWhitespaceOnly(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestHasWhitespace(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"abacaba", false}, + {"", false}, + {" ", true}, + {" \r\n ", true}, + {"\014\012\011\013\015", true}, + {"\014\012\011\013 abc \015", true}, + {"\f\n\t\v\r\f", true}, + {"x\n\t\t\t\t", true}, + {"\f\n\t \n\n\n \v\r\f", true}, + } + for _, test := range tests { + actual := HasWhitespace(test.param) + if actual != test.expected { + t.Errorf("Expected HasWhitespace(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsDivisibleBy(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 string + expected bool + }{ + {"4", "2", true}, + {"100", "10", true}, + {"", "1", true}, + {"123", "foo", false}, + {"123", "0", false}, + } + for _, test := range tests { + actual := IsDivisibleBy(test.param1, test.param2) + if actual != test.expected { + t.Errorf("Expected IsDivisibleBy(%q, %q) to be %v, got %v", test.param1, test.param2, test.expected, actual) + } + } +} + +// This small example illustrate how to work with IsDivisibleBy function. +func ExampleIsDivisibleBy() { + println("1024 is divisible by 64: ", IsDivisibleBy("1024", "64")) +} + +func TestIsByteLength(t *testing.T) { + t.Parallel() + + var tests = []struct { + param1 string + param2 int + param3 int + expected bool + }{ + {"abacaba", 100, -1, false}, + {"abacaba", 1, 3, false}, + {"abacaba", 1, 7, true}, + {"abacaba", 0, 8, true}, + {"\ufff0", 1, 1, false}, + } + for _, test := range tests { + actual := IsByteLength(test.param1, test.param2, test.param3) + if actual != test.expected { + t.Errorf("Expected IsByteLength(%q, %q, %q) to be %v, got %v", test.param1, test.param2, test.param3, test.expected, actual) + } + } +} + +func TestIsJSON(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"145", true}, + {"asdf", false}, + {"123:f00", false}, + {"{\"Name\":\"Alice\",\"Body\":\"Hello\",\"Time\":1294706395881547000}", true}, + {"{}", true}, + {"{\"Key\":{\"Key\":{\"Key\":123}}}", true}, + {"[]", true}, + {"null", true}, + } + for _, test := range tests { + actual := IsJSON(test.param) + if actual != test.expected { + t.Errorf("Expected IsJSON(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsMultibyte(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"abc", false}, + {"123", false}, + {"<>@;.-=", false}, + {"ã²ã‚‰ãŒãªãƒ»ã‚«ã‚¿ã‚«ãƒŠã€ï¼Žæ¼¢å­—", true}, + {"ã‚ã„ã†ãˆãŠ foobar", true}, + {"testï¼ example.com", true}, + {"testï¼ example.com", true}, + {"1234abcDExyz", true}, + {"カタカナ", true}, + {"", true}, + } + for _, test := range tests { + actual := IsMultibyte(test.param) + if actual != test.expected { + t.Errorf("Expected IsMultibyte(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsASCII(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"fï½ï½bar", false}, + {"xyzï¼ï¼™ï¼˜", false}, + {"123456", false}, + {"カタカナ", false}, + {"foobar", true}, + {"0987654321", true}, + {"test@example.com", true}, + {"1234abcDEF", true}, + {"", true}, + } + for _, test := range tests { + actual := IsASCII(test.param) + if actual != test.expected { + t.Errorf("Expected IsASCII(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsPrintableASCII(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"fï½ï½bar", false}, + {"xyzï¼ï¼™ï¼˜", false}, + {"123456", false}, + {"カタカナ", false}, + {"foobar", true}, + {"0987654321", true}, + {"test@example.com", true}, + {"1234abcDEF", true}, + {"newline\n", false}, + {"\x19test\x7F", false}, + } + for _, test := range tests { + actual := IsPrintableASCII(test.param) + if actual != test.expected { + t.Errorf("Expected IsPrintableASCII(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsFullWidth(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc", false}, + {"abc123", false}, + {"!\"#$%&()<>/+=-_? ~^|.,@`{}[]", false}, + {"ã²ã‚‰ãŒãªãƒ»ã‚«ã‚¿ã‚«ãƒŠã€ï¼Žæ¼¢å­—", true}, + {"3ーï¼ã€€ï½ï¼ ï½ƒï½ï½", true}, + {"Fカタカナ゙ᆲ", true}, + {"Goodï¼Parts", true}, + {"", true}, + } + for _, test := range tests { + actual := IsFullWidth(test.param) + if actual != test.expected { + t.Errorf("Expected IsFullWidth(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsHalfWidth(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"ã‚ã„ã†ãˆãŠ", false}, + {"ï¼ï¼ï¼‘1", false}, + {"!\"#$%&()<>/+=-_? ~^|.,@`{}[]", true}, + {"l-btn_02--active", true}, + {"abc123ã„", true}, + {"カタカナ゙ᆲ←", true}, + {"", true}, + } + for _, test := range tests { + actual := IsHalfWidth(test.param) + if actual != test.expected { + t.Errorf("Expected IsHalfWidth(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsVariableWidth(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"ã²ã‚‰ãŒãªã‚«ã‚¿ã‚«ãƒŠæ¼¢å­—ABCDE", true}, + {"3ーï¼123", true}, + {"Fカタカナ゙ᆲ", true}, + {"", true}, + {"Goodï¼Parts", true}, + {"abc", false}, + {"abc123", false}, + {"!\"#$%&()<>/+=-_? ~^|.,@`{}[]", false}, + {"ã²ã‚‰ãŒãªãƒ»ã‚«ã‚¿ã‚«ãƒŠã€ï¼Žæ¼¢å­—", false}, + {"123456", false}, + {"カタカナ゙ᆲ", false}, + } + for _, test := range tests { + actual := IsVariableWidth(test.param) + if actual != test.expected { + t.Errorf("Expected IsVariableWidth(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsUUID(t *testing.T) { + t.Parallel() + + // Tests without version + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3xxx", false}, + {"a987fbc94bed3078cf079141ba07c9f3", false}, + {"934859", false}, + {"987fbc9-4bed-3078-cf07a-9141ba07c9f3", false}, + {"aaaaaaaa-1111-1111-aaag-111111111111", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", true}, + } + for _, test := range tests { + actual := IsUUID(test.param) + if actual != test.expected { + t.Errorf("Expected IsUUID(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // UUID ver. 3 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"412452646", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-4078-8f07-9141ba07c9f3", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", true}, + } + for _, test := range tests { + actual := IsUUIDv3(test.param) + if actual != test.expected { + t.Errorf("Expected IsUUIDv3(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // UUID ver. 4 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-5078-af07-9141ba07c9f3", false}, + {"934859", false}, + {"57b73598-8764-4ad0-a76a-679bb6640eb1", true}, + {"625e63f3-58f5-40b7-83a1-a72ad31acffb", true}, + } + for _, test := range tests { + actual := IsUUIDv4(test.param) + if actual != test.expected { + t.Errorf("Expected IsUUIDv4(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // UUID ver. 5 + tests = []struct { + param string + expected bool + }{ + + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"9c858901-8a57-4791-81fe-4c455b099bc9", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"987fbc97-4bed-5078-af07-9141ba07c9f3", true}, + {"987fbc97-4bed-5078-9f07-9141ba07c9f3", true}, + } + for _, test := range tests { + actual := IsUUIDv5(test.param) + if actual != test.expected { + t.Errorf("Expected IsUUIDv5(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsCreditCard(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"5398228707871528", false}, + {"375556917985515", true}, + {"36050234196908", true}, + {"4716461583322103", true}, + {"4716-2210-5188-5662", true}, + {"4929 7226 5379 7141", true}, + {"5398228707871527", true}, + } + for _, test := range tests { + actual := IsCreditCard(test.param) + if actual != test.expected { + t.Errorf("Expected IsCreditCard(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISBN(t *testing.T) { + t.Parallel() + + // Without version + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3836221195", true}, + {"1-61729-085-8", true}, + {"3 423 21412 0", true}, + {"3 401 01319 X", true}, + {"9784873113685", true}, + {"978-4-87311-368-5", true}, + {"978 3401013190", true}, + {"978-3-8362-2119-1", true}, + } + for _, test := range tests { + actual := IsISBN(test.param, -1) + if actual != test.expected { + t.Errorf("Expected IsISBN(%q, -1) to be %v, got %v", test.param, test.expected, actual) + } + } + + // ISBN 10 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3423214121", false}, + {"978-3836221191", false}, + {"3-423-21412-1", false}, + {"3 423 21412 1", false}, + {"3836221195", true}, + {"1-61729-085-8", true}, + {"3 423 21412 0", true}, + {"3 401 01319 X", true}, + } + for _, test := range tests { + actual := IsISBN10(test.param) + if actual != test.expected { + t.Errorf("Expected IsISBN10(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // ISBN 13 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3-8362-2119-5", false}, + {"01234567890ab", false}, + {"978 3 8362 2119 0", false}, + {"9784873113685", true}, + {"978-4-87311-368-5", true}, + {"978 3401013190", true}, + {"978-3-8362-2119-1", true}, + } + for _, test := range tests { + actual := IsISBN13(test.param) + if actual != test.expected { + t.Errorf("Expected IsISBN13(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsDataURI(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"data:image/png;base64,TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4=", true}, + {"data:text/plain;base64,Vml2YW11cyBmZXJtZW50dW0gc2VtcGVyIHBvcnRhLg==", true}, + {"image/gif;base64,U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", false}, + {"data:image/gif;base64,MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMPNS1Ufof9EW/M98FNw" + + "UAKrwflsqVxaxQjBQnHQmiI7Vac40t8x7pIb8gLGV6wL7sBTJiPovJ0V7y7oc0Ye" + + "rhKh0Rm4skP2z/jHwwZICgGzBvA0rH8xlhUiTvcwDCJ0kc+fh35hNt8srZQM4619" + + "FTgB66Xmp4EtVyhpQV+t02g6NzK72oZI0vnAvqhpkxLeLiMCyrI416wHm5Tkukhx" + + "QmcL2a6hNOyu0ixX/x2kSFXApEnVrJ+/IxGyfyw8kf4N2IZpW5nEP847lpfj0SZZ" + + "Fwrd1mnfnDbYohX2zRptLy2ZUn06Qo9pkG5ntvFEPo9bfZeULtjYzIl6K8gJ2uGZ" + "HQIDAQAB", true}, + {"data:image/png;base64,12345", false}, + {"", false}, + {"data:text,:;base85,U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", false}, + } + for _, test := range tests { + actual := IsDataURI(test.param) + if actual != test.expected { + t.Errorf("Expected IsDataURI(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsBase64(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4=", true}, + {"Vml2YW11cyBmZXJtZW50dW0gc2VtcGVyIHBvcnRhLg==", true}, + {"U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", true}, + {"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMPNS1Ufof9EW/M98FNw" + + "UAKrwflsqVxaxQjBQnHQmiI7Vac40t8x7pIb8gLGV6wL7sBTJiPovJ0V7y7oc0Ye" + + "rhKh0Rm4skP2z/jHwwZICgGzBvA0rH8xlhUiTvcwDCJ0kc+fh35hNt8srZQM4619" + + "FTgB66Xmp4EtVyhpQV+t02g6NzK72oZI0vnAvqhpkxLeLiMCyrI416wHm5Tkukhx" + + "QmcL2a6hNOyu0ixX/x2kSFXApEnVrJ+/IxGyfyw8kf4N2IZpW5nEP847lpfj0SZZ" + + "Fwrd1mnfnDbYohX2zRptLy2ZUn06Qo9pkG5ntvFEPo9bfZeULtjYzIl6K8gJ2uGZ" + "HQIDAQAB", true}, + {"12345", false}, + {"", false}, + {"Vml2YW11cyBmZXJtZtesting123", false}, + } + for _, test := range tests { + actual := IsBase64(test.param) + if actual != test.expected { + t.Errorf("Expected IsBase64(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISO3166Alpha2(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"ABCD", false}, + {"A", false}, + {"AC", false}, + {"AP", false}, + {"GER", false}, + {"NU", true}, + {"DE", true}, + {"JP", true}, + {"JPN", false}, + {"ZWE", false}, + {"GER", false}, + {"DEU", false}, + } + for _, test := range tests { + actual := IsISO3166Alpha2(test.param) + if actual != test.expected { + t.Errorf("Expected IsISO3166Alpha2(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISO3166Alpha3(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"ABCD", false}, + {"A", false}, + {"AC", false}, + {"AP", false}, + {"NU", false}, + {"DE", false}, + {"JP", false}, + {"ZWE", true}, + {"JPN", true}, + {"GER", false}, + {"DEU", true}, + } + for _, test := range tests { + actual := IsISO3166Alpha3(test.param) + if actual != test.expected { + t.Errorf("Expected IsISO3166Alpha3(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISO693Alpha2(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"abcd", false}, + {"a", false}, + {"ac", false}, + {"ap", false}, + {"de", true}, + {"DE", false}, + {"mk", true}, + {"mac", false}, + {"sw", true}, + {"SW", false}, + {"ger", false}, + {"deu", false}, + } + for _, test := range tests { + actual := IsISO693Alpha2(test.param) + if actual != test.expected { + t.Errorf("Expected IsISO693Alpha2(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISO693Alpha3b(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"abcd", false}, + {"a", false}, + {"ac", false}, + {"ap", false}, + {"de", false}, + {"DE", false}, + {"mkd", false}, + {"mac", true}, + {"sw", false}, + {"SW", false}, + {"ger", true}, + {"deu", false}, + } + for _, test := range tests { + actual := IsISO693Alpha3b(test.param) + if actual != test.expected { + t.Errorf("Expected IsISO693Alpha3b(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsIP(t *testing.T) { + t.Parallel() + + // Without version + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", true}, + {"0.0.0.0", true}, + {"255.255.255.255", true}, + {"1.2.3.4", true}, + {"::1", true}, + {"2001:db8:0000:1:1:1:1:1", true}, + {"300.0.0.0", false}, + } + for _, test := range tests { + actual := IsIP(test.param) + if actual != test.expected { + t.Errorf("Expected IsIP(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // IPv4 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", true}, + {"0.0.0.0", true}, + {"255.255.255.255", true}, + {"1.2.3.4", true}, + {"::1", false}, + {"2001:db8:0000:1:1:1:1:1", false}, + {"300.0.0.0", false}, + } + for _, test := range tests { + actual := IsIPv4(test.param) + if actual != test.expected { + t.Errorf("Expected IsIPv4(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + // IPv6 + tests = []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", false}, + {"0.0.0.0", false}, + {"255.255.255.255", false}, + {"1.2.3.4", false}, + {"::1", true}, + {"2001:db8:0000:1:1:1:1:1", true}, + {"300.0.0.0", false}, + } + for _, test := range tests { + actual := IsIPv6(test.param) + if actual != test.expected { + t.Errorf("Expected IsIPv6(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsPort(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"1", true}, + {"65535", true}, + {"0", false}, + {"65536", false}, + {"65538", false}, + } + + for _, test := range tests { + actual := IsPort(test.param) + if actual != test.expected { + t.Errorf("Expected IsPort(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsDNSName(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"localhost", true}, + {"a.bc", true}, + {"a.b.", true}, + {"a.b..", false}, + {"localhost.local", true}, + {"localhost.localdomain.intern", true}, + {"l.local.intern", true}, + {"ru.link.n.svpncloud.com", true}, + {"-localhost", false}, + {"localhost.-localdomain", false}, + {"localhost.localdomain.-int", false}, + {"_localhost", true}, + {"localhost._localdomain", true}, + {"localhost.localdomain._int", true}, + {"lÖcalhost", false}, + {"localhost.lÖcaldomain", false}, + {"localhost.localdomain.üntern", false}, + {"__", true}, + {"localhost/", false}, + {"127.0.0.1", false}, + {"[::1]", false}, + {"50.50.50.50", false}, + {"localhost.localdomain.intern:65535", false}, + {"漢字汉字", false}, + {"www.jubfvq1v3p38i51622y0dvmdk1mymowjyeu26gbtw9andgynj1gg8z3msb1kl5z6906k846pj3sulm4kiyk82ln5teqj9nsht59opr0cs5ssltx78lfyvml19lfq1wp4usbl0o36cmiykch1vywbttcus1p9yu0669h8fj4ll7a6bmop505908s1m83q2ec2qr9nbvql2589adma3xsq2o38os2z3dmfh2tth4is4ixyfasasasefqwe4t2ub2fz1rme.de", false}, + } + + for _, test := range tests { + actual := IsDNSName(test.param) + if actual != test.expected { + t.Errorf("Expected IsDNS(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsHost(t *testing.T) { + t.Parallel() + var tests = []struct { + param string + expected bool + }{ + {"localhost", true}, + {"localhost.localdomain", true}, + {"2001:db8:0000:1:1:1:1:1", true}, + {"::1", true}, + {"play.golang.org", true}, + {"localhost.localdomain.intern:65535", false}, + {"-[::1]", false}, + {"-localhost", false}, + {".localhost", false}, + } + for _, test := range tests { + actual := IsHost(test.param) + if actual != test.expected { + t.Errorf("Expected IsHost(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + +} + +func TestIsDialString(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"localhost.local:1", true}, + {"localhost.localdomain:9090", true}, + {"localhost.localdomain.intern:65535", true}, + {"127.0.0.1:30000", true}, + {"[::1]:80", true}, + {"[1200::AB00:1234::2552:7777:1313]:22", false}, + {"-localhost:1", false}, + {"localhost.-localdomain:9090", false}, + {"localhost.localdomain.-int:65535", false}, + {"localhost.loc:100000", false}, + {"漢字汉字:2", false}, + {"www.jubfvq1v3p38i51622y0dvmdk1mymowjyeu26gbtw9andgynj1gg8z3msb1kl5z6906k846pj3sulm4kiyk82ln5teqj9nsht59opr0cs5ssltx78lfyvml19lfq1wp4usbl0o36cmiykch1vywbttcus1p9yu0669h8fj4ll7a6bmop505908s1m83q2ec2qr9nbvql2589adma3xsq2o38os2z3dmfh2tth4is4ixyfasasasefqwe4t2ub2fz1rme.de:20000", false}, + } + + for _, test := range tests { + actual := IsDialString(test.param) + if actual != test.expected { + t.Errorf("Expected IsDialString(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsMAC(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"3D:F2:C9:A6:B3:4F", true}, + {"3D-F2-C9-A6-B3:4F", false}, + {"123", false}, + {"", false}, + {"abacaba", false}, + } + for _, test := range tests { + actual := IsMAC(test.param) + if actual != test.expected { + t.Errorf("Expected IsMAC(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestFilePath(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + osType int + }{ + {"c:\\" + strings.Repeat("a", 32767), true, Win}, //See http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + {"c:\\" + strings.Repeat("a", 32768), false, Win}, + {"c:\\path\\file (x86)\bar", true, Win}, + {"c:\\path\\file", true, Win}, + {"c:\\path\\file:exe", false, Unknown}, + {"C:\\", true, Win}, + {"c:\\path\\file\\", true, Win}, + {"c:/path/file/", false, Unknown}, + {"/path/file/", true, Unix}, + {"/path/file:SAMPLE/", true, Unix}, + {"/path/file:/.txt", true, Unix}, + {"/path", true, Unix}, + {"/path/__bc/file.txt", true, Unix}, + {"/path/a--ac/file.txt", true, Unix}, + {"/_path/file.txt", true, Unix}, + {"/path/__bc/file.txt", true, Unix}, + {"/path/a--ac/file.txt", true, Unix}, + {"/__path/--file.txt", true, Unix}, + {"/path/a bc", true, Unix}, + } + for _, test := range tests { + actual, osType := IsFilePath(test.param) + if actual != test.expected || osType != test.osType { + t.Errorf("Expected IsFilePath(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsLatitude(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"-90.000", true}, + {"+90", true}, + {"47.1231231", true}, + {"+99.9", false}, + {"108", false}, + } + for _, test := range tests { + actual := IsLatitude(test.param) + if actual != test.expected { + t.Errorf("Expected IsLatitude(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsLongitude(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"-180.000", true}, + {"180.1", false}, + {"+73.234", true}, + {"+382.3811", false}, + {"23.11111111", true}, + } + for _, test := range tests { + actual := IsLongitude(test.param) + if actual != test.expected { + t.Errorf("Expected IsLongitude(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsSSN(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"00-90-8787", false}, + {"66690-76", false}, + {"191 60 2869", true}, + {"191-60-2869", true}, + } + for _, test := range tests { + actual := IsSSN(test.param) + if actual != test.expected { + t.Errorf("Expected IsSSN(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsMongoID(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"507f1f77bcf86cd799439011", true}, + {"507f1f77bcf86cd7994390", false}, + {"507f1f77bcf86cd79943901z", false}, + {"507f1f77bcf86cd799439011 ", false}, + {"", false}, + } + for _, test := range tests { + actual := IsMongoID(test.param) + if actual != test.expected { + t.Errorf("Expected IsMongoID(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsSemver(t *testing.T) { + t.Parallel() + var tests = []struct { + param string + expected bool + }{ + {"v1.0.0", true}, + {"1.0.0", true}, + {"1.1.01", false}, + {"1.01.0", false}, + {"01.1.0", false}, + {"v1.1.01", false}, + {"v1.01.0", false}, + {"v01.1.0", false}, + {"1.0.0-alpha", true}, + {"1.0.0-alpha.1", true}, + {"1.0.0-0.3.7", true}, + {"1.0.0-0.03.7", false}, + {"1.0.0-00.3.7", false}, + {"1.0.0-x.7.z.92", true}, + {"1.0.0-alpha+001", true}, + {"1.0.0+20130313144700", true}, + {"1.0.0-beta+exp.sha.5114f85", true}, + {"1.0.0-beta+exp.sha.05114f85", true}, + {"1.0.0-+beta", false}, + {"1.0.0-b+-9+eta", false}, + {"v+1.8.0-b+-9+eta", false}, + } + for _, test := range tests { + actual := IsSemver(test.param) + if actual != test.expected { + t.Errorf("Expected IsSemver(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsTime(t *testing.T) { + t.Parallel() + var tests = []struct { + param string + format string + expected bool + }{ + {"2016-12-31 11:00", time.RFC3339, false}, + {"2016-12-31 11:00:00", time.RFC3339, false}, + {"2016-12-31T11:00", time.RFC3339, false}, + {"2016-12-31T11:00:00", time.RFC3339, false}, + {"2016-12-31T11:00:00Z", time.RFC3339, true}, + {"2016-12-31T11:00:00+01:00", time.RFC3339, true}, + {"2016-12-31T11:00:00-01:00", time.RFC3339, true}, + {"2016-12-31T11:00:00.05Z", time.RFC3339, true}, + {"2016-12-31T11:00:00.05-01:00", time.RFC3339, true}, + {"2016-12-31T11:00:00.05+01:00", time.RFC3339, true}, + {"2016-12-31T11:00:00", RF3339WithoutZone, true}, + {"2016-12-31T11:00:00Z", RF3339WithoutZone, false}, + {"2016-12-31T11:00:00+01:00", RF3339WithoutZone, false}, + {"2016-12-31T11:00:00-01:00", RF3339WithoutZone, false}, + {"2016-12-31T11:00:00.05Z", RF3339WithoutZone, false}, + {"2016-12-31T11:00:00.05-01:00", RF3339WithoutZone, false}, + {"2016-12-31T11:00:00.05+01:00", RF3339WithoutZone, false}, + } + for _, test := range tests { + actual := IsTime(test.param, test.format) + if actual != test.expected { + t.Errorf("Expected IsTime(%q, time.RFC3339) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsRFC3339(t *testing.T) { + t.Parallel() + var tests = []struct { + param string + expected bool + }{ + {"2016-12-31 11:00", false}, + {"2016-12-31 11:00:00", false}, + {"2016-12-31T11:00", false}, + {"2016-12-31T11:00:00", false}, + {"2016-12-31T11:00:00Z", true}, + {"2016-12-31T11:00:00+01:00", true}, + {"2016-12-31T11:00:00-01:00", true}, + {"2016-12-31T11:00:00.05Z", true}, + {"2016-12-31T11:00:00.05-01:00", true}, + {"2016-12-31T11:00:00.05+01:00", true}, + } + for _, test := range tests { + actual := IsRFC3339(test.param) + if actual != test.expected { + t.Errorf("Expected IsRFC3339(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestIsISO4217(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", false}, + {"ABCD", false}, + {"A", false}, + {"ZZZ", false}, + {"usd", false}, + {"USD", true}, + } + for _, test := range tests { + actual := IsISO4217(test.param) + if actual != test.expected { + t.Errorf("Expected IsISO4217(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestByteLength(t *testing.T) { + t.Parallel() + + var tests = []struct { + value string + min string + max string + expected bool + }{ + {"123456", "0", "100", true}, + {"1239999", "0", "0", false}, + {"1239asdfasf99", "100", "200", false}, + {"1239999asdff29", "10", "30", true}, + {"ä½ ", "0", "1", false}, + } + for _, test := range tests { + actual := ByteLength(test.value, test.min, test.max) + if actual != test.expected { + t.Errorf("Expected ByteLength(%s, %s, %s) to be %v, got %v", test.value, test.min, test.max, test.expected, actual) + } + } +} + +func TestRuneLength(t *testing.T) { + t.Parallel() + + var tests = []struct { + value string + min string + max string + expected bool + }{ + {"123456", "0", "100", true}, + {"1239999", "0", "0", false}, + {"1239asdfasf99", "100", "200", false}, + {"1239999asdff29", "10", "30", true}, + {"ä½ ", "0", "1", true}, + } + for _, test := range tests { + actual := RuneLength(test.value, test.min, test.max) + if actual != test.expected { + t.Errorf("Expected RuneLength(%s, %s, %s) to be %v, got %v", test.value, test.min, test.max, test.expected, actual) + } + } +} + +func TestStringLength(t *testing.T) { + t.Parallel() + + var tests = []struct { + value string + min string + max string + expected bool + }{ + {"123456", "0", "100", true}, + {"1239999", "0", "0", false}, + {"1239asdfasf99", "100", "200", false}, + {"1239999asdff29", "10", "30", true}, + {"ã‚ã„ã†ãˆãŠ", "0", "5", true}, + {"ã‚ã„ã†ãˆãŠã‹", "0", "5", false}, + {"ã‚ã„ã†ãˆãŠ", "0", "0", false}, + {"ã‚ã„ã†ãˆ", "5", "10", false}, + } + for _, test := range tests { + actual := StringLength(test.value, test.min, test.max) + if actual != test.expected { + t.Errorf("Expected StringLength(%s, %s, %s) to be %v, got %v", test.value, test.min, test.max, test.expected, actual) + } + } +} + +func TestIsIn(t *testing.T) { + t.Parallel() + + var tests = []struct { + value string + params []string + expected bool + }{ + {"PRESENT", []string{"PRESENT"}, true}, + {"PRESENT", []string{"PRESENT", "PRÉSENTE", "NOTABSENT"}, true}, + {"PRÉSENTE", []string{"PRESENT", "PRÉSENTE", "NOTABSENT"}, true}, + {"PRESENT", []string{}, false}, + {"PRESENT", nil, false}, + {"ABSENT", []string{"PRESENT", "PRÉSENTE", "NOTABSENT"}, false}, + {"", []string{"PRESENT", "PRÉSENTE", "NOTABSENT"}, false}, + } + for _, test := range tests { + actual := IsIn(test.value, test.params...) + if actual != test.expected { + t.Errorf("Expected IsIn(%s, %v) to be %v, got %v", test.value, test.params, test.expected, actual) + } + } +} + +type Address struct { + Street string `valid:"-"` + Zip string `json:"zip" valid:"numeric,required"` +} + +type User struct { + Name string `valid:"required"` + Email string `valid:"required,email"` + Password string `valid:"required"` + Age int `valid:"required,numeric,@#\u0000"` + Home *Address + Work []Address +} + +type UserValid struct { + Name string `valid:"required"` + Email string `valid:"required,email"` + Password string `valid:"required"` + Age int `valid:"required"` + Home *Address + Work []Address `valid:"required"` +} + +type PrivateStruct struct { + privateField string `valid:"required,alpha,d_k"` + NonZero int + ListInt []int + ListString []string `valid:"alpha"` + Work [2]Address + Home Address + Map map[string]Address +} + +type NegationStruct struct { + NotInt string `valid:"!int"` + Int string `valid:"int"` +} + +type LengthStruct struct { + Length string `valid:"length(10|20)"` +} + +type StringLengthStruct struct { + Length string `valid:"stringlength(10|20)"` +} + +type StringMatchesStruct struct { + StringMatches string `valid:"matches(^[0-9]{3}$)"` +} + +// TODO: this testcase should be fixed +// type StringMatchesComplexStruct struct { +// StringMatches string `valid:"matches(^\\$\\([\"']\\w+[\"']\\)$)"` +// } + +type IsInStruct struct { + IsIn string `valid:"in(PRESENT|PRÉSENTE|NOTABSENT)"` +} + +type Post struct { + Title string `valid:"alpha,required"` + Message string `valid:"ascii"` + AuthorIP string `valid:"ipv4"` +} + +type MissingValidationDeclarationStruct struct { + Name string `` + Email string `valid:"required,email"` +} + +type FieldRequiredByDefault struct { + Email string `valid:"email"` +} + +type MultipleFieldsRequiredByDefault struct { + Url string `valid:"url"` + Email string `valid:"email"` +} + +type FieldsRequiredByDefaultButExemptStruct struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +type FieldsRequiredByDefaultButExemptOrOptionalStruct struct { + Name string `valid:"-"` + Email string `valid:"optional,email"` +} + +type MessageWithSeveralFieldsStruct struct { + Title string `valid:"length(1|10)"` + Body string `valid:"length(1|10)"` +} + +func TestValidateMissingValidationDeclarationStruct(t *testing.T) { + var tests = []struct { + param MissingValidationDeclarationStruct + expected bool + }{ + {MissingValidationDeclarationStruct{}, false}, + {MissingValidationDeclarationStruct{Name: "TEST", Email: "test@example.com"}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestFieldRequiredByDefault(t *testing.T) { + var tests = []struct { + param FieldRequiredByDefault + expected bool + }{ + {FieldRequiredByDefault{}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestMultipleFieldsRequiredByDefault(t *testing.T) { + var tests = []struct { + param MultipleFieldsRequiredByDefault + expected bool + }{ + {MultipleFieldsRequiredByDefault{}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestFieldsRequiredByDefaultButExemptStruct(t *testing.T) { + var tests = []struct { + param FieldsRequiredByDefaultButExemptStruct + expected bool + }{ + {FieldsRequiredByDefaultButExemptStruct{}, false}, + {FieldsRequiredByDefaultButExemptStruct{Name: "TEST"}, false}, + {FieldsRequiredByDefaultButExemptStruct{Email: ""}, false}, + {FieldsRequiredByDefaultButExemptStruct{Email: "test@example.com"}, true}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestFieldsRequiredByDefaultButExemptOrOptionalStruct(t *testing.T) { + var tests = []struct { + param FieldsRequiredByDefaultButExemptOrOptionalStruct + expected bool + }{ + {FieldsRequiredByDefaultButExemptOrOptionalStruct{}, true}, + {FieldsRequiredByDefaultButExemptOrOptionalStruct{Name: "TEST"}, true}, + {FieldsRequiredByDefaultButExemptOrOptionalStruct{Email: ""}, true}, + {FieldsRequiredByDefaultButExemptOrOptionalStruct{Email: "test@example.com"}, true}, + {FieldsRequiredByDefaultButExemptOrOptionalStruct{Email: "test@example"}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestInvalidValidator(t *testing.T) { + type InvalidStruct struct { + Field int `valid:"someInvalidValidator"` + } + + invalidStruct := InvalidStruct{1} + if valid, err := ValidateStruct(&invalidStruct); valid || err == nil || + err.Error() != `Field: The following validator is invalid or can't be applied to the field: "someInvalidValidator"` { + t.Errorf("Got an unexpected result for struct with invalid validator: %t %s", valid, err) + } +} + +func TestCustomValidator(t *testing.T) { + type ValidStruct struct { + Field int `valid:"customTrueValidator"` + } + + type InvalidStruct struct { + Field int `valid:"customFalseValidator~Value: %s Custom validator error: %s"` + } + + type StructWithCustomAndBuiltinValidator struct { + Field int `valid:"customTrueValidator,required"` + } + + if valid, err := ValidateStruct(&ValidStruct{Field: 1}); !valid || err != nil { + t.Errorf("Got an unexpected result for struct with custom always true validator: %t %s", valid, err) + } + + if valid, err := ValidateStruct(&InvalidStruct{Field: 1}); valid || err == nil || err.Error() != "Value: 1 Custom validator error: customFalseValidator" { + fmt.Println(err) + t.Errorf("Got an unexpected result for struct with custom always false validator: %t %s", valid, err) + } + + mixedStruct := StructWithCustomAndBuiltinValidator{} + if valid, err := ValidateStruct(&mixedStruct); valid || err == nil || err.Error() != "Field: non zero value required" { + t.Errorf("Got an unexpected result for invalid struct with custom and built-in validators: %t %s", valid, err) + } + + mixedStruct.Field = 1 + if valid, err := ValidateStruct(&mixedStruct); !valid || err != nil { + t.Errorf("Got an unexpected result for valid struct with custom and built-in validators: %t %s", valid, err) + } +} + +type CustomByteArray [6]byte + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +func TestStructWithCustomByteArray(t *testing.T) { + t.Parallel() + + // add our custom byte array validator that fails when the byte array is pristine (all zeroes) + CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + switch v := o.(type) { + case StructWithCustomByteArray: + if len(v.Email) > 0 { + if v.Email != "test@example.com" { + t.Errorf("v.Email should have been 'test@example.com' but was '%s'", v.Email) + } + } + default: + t.Errorf("Context object passed to custom validator should have been a StructWithCustomByteArray but was %T (%+v)", o, o) + } + + switch v := i.(type) { + case CustomByteArray: + for _, e := range v { // check if v is empty, i.e. all zeroes + if e != 0 { + return true + } + } + } + return false + })) + CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + switch v := o.(type) { + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false + })) + testCustomByteArray := CustomByteArray{'1', '2', '3', '4', '5', '6'} + var tests = []struct { + param StructWithCustomByteArray + expected bool + }{ + {StructWithCustomByteArray{}, false}, + {StructWithCustomByteArray{Email: "test@example.com"}, false}, + {StructWithCustomByteArray{ID: testCustomByteArray, Email: "test@example.com"}, true}, + {StructWithCustomByteArray{ID: testCustomByteArray, Email: "test@example.com", CustomMinLength: 7}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestValidateNegationStruct(t *testing.T) { + var tests = []struct { + param NegationStruct + expected bool + }{ + {NegationStruct{"a1", "11"}, true}, + {NegationStruct{"email@email.email", "11"}, true}, + {NegationStruct{"123456----", "11"}, true}, + {NegationStruct{"::1", "11"}, true}, + {NegationStruct{"123.123", "11"}, true}, + {NegationStruct{"a1", "a1"}, false}, + {NegationStruct{"11", "a1"}, false}, + {NegationStruct{"11", "11"}, false}, + } + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestLengthStruct(t *testing.T) { + var tests = []struct { + param interface{} + expected bool + }{ + {LengthStruct{"11111"}, false}, + {LengthStruct{"11111111111111111110000000000000000"}, false}, + {LengthStruct{"11dfffdf0099"}, true}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestStringLengthStruct(t *testing.T) { + var tests = []struct { + param interface{} + expected bool + }{ + {StringLengthStruct{"11111"}, false}, + {StringLengthStruct{"11111111111111111110000000000000000"}, false}, + {StringLengthStruct{"11dfffdf0099"}, true}, + {StringLengthStruct{"ã‚ã„ã†ãˆãŠ"}, false}, + {StringLengthStruct{"ã‚ã„ã†ãˆãŠã‹ããã‘ã“"}, true}, + {StringLengthStruct{"ã‚ã„ã†ãˆãŠã‹ããã‘ã“ã•ã—ã™ã›ããŸã¡ã¤ã¦ã¨"}, true}, + {StringLengthStruct{"ã‚ã„ã†ãˆãŠã‹ããã‘ã“ã•ã—ã™ã›ããŸã¡ã¤ã¦ã¨ãª"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestStringMatchesStruct(t *testing.T) { + var tests = []struct { + param interface{} + expected bool + }{ + {StringMatchesStruct{"123"}, true}, + {StringMatchesStruct{"123456"}, false}, + {StringMatchesStruct{"123abcd"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestIsInStruct(t *testing.T) { + var tests = []struct { + param interface{} + expected bool + }{ + {IsInStruct{"PRESENT"}, true}, + {IsInStruct{""}, true}, + {IsInStruct{" "}, false}, + {IsInStruct{"ABSENT"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestRequiredIsInStruct(t *testing.T) { + type RequiredIsInStruct struct { + IsIn string `valid:"in(PRESENT|PRÉSENTE|NOTABSENT),required"` + } + + var tests = []struct { + param interface{} + expected bool + }{ + {RequiredIsInStruct{"PRESENT"}, true}, + {RequiredIsInStruct{""}, false}, + {RequiredIsInStruct{" "}, false}, + {RequiredIsInStruct{"ABSENT"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestEmptyRequiredIsInStruct(t *testing.T) { + type EmptyRequiredIsInStruct struct { + IsIn string `valid:"in(),required"` + } + + var tests = []struct { + param interface{} + expected bool + }{ + {EmptyRequiredIsInStruct{"PRESENT"}, false}, + {EmptyRequiredIsInStruct{""}, false}, + {EmptyRequiredIsInStruct{" "}, false}, + {EmptyRequiredIsInStruct{"ABSENT"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestEmptyStringPtr(t *testing.T) { + type EmptyIsInStruct struct { + IsIn *string `valid:"length(3|5),required"` + } + + var empty = "" + var valid = "123" + var invalid = "123456" + + var tests = []struct { + param interface{} + expected bool + expectedErr string + }{ + {EmptyIsInStruct{&empty}, false, "IsIn: non zero value required"}, + {EmptyIsInStruct{nil}, true, ""}, + {EmptyIsInStruct{&valid}, true, ""}, + {EmptyIsInStruct{&invalid}, false, "IsIn: 123456 does not validate as length(3|5)"}, + } + + SetNilPtrAllowedByRequired(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + } + if err != nil { + if err.Error() != test.expectedErr { + t.Errorf("Got Error on ValidateStruct(%q). Expected: %s Actual: %s", test.param, test.expectedErr, err) + } + } else if test.expectedErr != "" { + t.Errorf("Expected error on ValidateStruct(%q).", test.param) + } + } + SetNilPtrAllowedByRequired(false) +} + +func TestNestedStruct(t *testing.T) { + type EvenMoreNestedStruct struct { + Bar string `valid:"length(3|5)"` + } + type NestedStruct struct { + Foo string `valid:"length(3|5),required"` + EvenMoreNested EvenMoreNestedStruct + SliceEvenMoreNested []EvenMoreNestedStruct + MapEvenMoreNested map[string]EvenMoreNestedStruct + } + type OuterStruct struct { + Nested NestedStruct + } + + var tests = []struct { + param interface{} + expected bool + expectedErr string + }{ + {OuterStruct{ + Nested: NestedStruct{ + Foo: "", + }, + }, false, "Nested.Foo: non zero value required"}, + {OuterStruct{ + Nested: NestedStruct{ + Foo: "123", + }, + }, true, ""}, + {OuterStruct{ + Nested: NestedStruct{ + Foo: "123456", + }, + }, false, "Nested.Foo: 123456 does not validate as length(3|5)"}, + {OuterStruct{ + Nested: NestedStruct{ + Foo: "123", + EvenMoreNested: EvenMoreNestedStruct{ + Bar: "123456", + }, + }, + }, false, "Nested.EvenMoreNested.Bar: 123456 does not validate as length(3|5)"}, + {OuterStruct{ + Nested: NestedStruct{ + Foo: "123", + SliceEvenMoreNested: []EvenMoreNestedStruct{ + EvenMoreNestedStruct{ + Bar: "123456", + }, + }, + }, + }, false, "Nested.SliceEvenMoreNested.0.Bar: 123456 does not validate as length(3|5)"}, + {OuterStruct{ + Nested: NestedStruct{ + Foo: "123", + MapEvenMoreNested: map[string]EvenMoreNestedStruct{ + "Foo": EvenMoreNestedStruct{ + Bar: "123456", + }, + }, + }, + }, false, "Nested.MapEvenMoreNested.Foo.Bar: 123456 does not validate as length(3|5)"}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + } + if err != nil { + if err.Error() != test.expectedErr { + t.Errorf("Got Error on ValidateStruct(%q). Expected: %s Actual: %s", test.param, test.expectedErr, err) + } + } else if test.expectedErr != "" { + t.Errorf("Expected error on ValidateStruct(%q).", test.param) + } + } +} + +func TestFunkyIsInStruct(t *testing.T) { + type FunkyIsInStruct struct { + IsIn string `valid:"in(PRESENT|| |PRÉSENTE|NOTABSENT)"` + } + + var tests = []struct { + param interface{} + expected bool + }{ + {FunkyIsInStruct{"PRESENT"}, true}, + {FunkyIsInStruct{""}, true}, + {FunkyIsInStruct{" "}, true}, + {FunkyIsInStruct{"ABSENT"}, false}, + } + + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +// TODO: test case broken +// func TestStringMatchesComplexStruct(t *testing.T) { +// var tests = []struct { +// param interface{} +// expected bool +// }{ +// {StringMatchesComplexStruct{"$()"}, false}, +// {StringMatchesComplexStruct{"$('AZERTY')"}, true}, +// {StringMatchesComplexStruct{`$("AZERTY")`}, true}, +// {StringMatchesComplexStruct{`$("")`}, false}, +// {StringMatchesComplexStruct{"AZERTY"}, false}, +// {StringMatchesComplexStruct{"$AZERTY"}, false}, +// } + +// for _, test := range tests { +// actual, err := ValidateStruct(test.param) +// if actual != test.expected { +// t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) +// if err != nil { +// t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) +// } +// } +// } +// } + +func TestValidateStruct(t *testing.T) { + + var tests = []struct { + param interface{} + expected bool + }{ + {User{"John", "john@yahoo.com", "123G#678", 20, &Address{"Street", "ABC456D89"}, []Address{{"Street", "123456"}, {"Street", "123456"}}}, false}, + {User{"John", "john!yahoo.com", "12345678", 20, &Address{"Street", "ABC456D89"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, + {User{"John", "", "12345", 0, &Address{"Street", "123456789"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, + {UserValid{"John", "john@yahoo.com", "123G#678", 20, &Address{"Street", "123456"}, []Address{{"Street", "123456"}, {"Street", "123456"}}}, true}, + {UserValid{"John", "john!yahoo.com", "12345678", 20, &Address{"Street", "ABC456D89"}, []Address{}}, false}, + {UserValid{"John", "john@yahoo.com", "12345678", 20, &Address{"Street", "123456xxx"}, []Address{{"Street", "123456"}, {"Street", "123456"}}}, false}, + {UserValid{"John", "john!yahoo.com", "12345678", 20, &Address{"Street", "ABC456D89"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, + {UserValid{"John", "", "12345", 0, &Address{"Street", "123456789"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, + {nil, true}, + {User{"John", "john@yahoo.com", "123G#678", 0, &Address{"Street", "123456"}, []Address{}}, false}, + {"im not a struct", false}, + } + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + + TagMap["d_k"] = Validator(func(str string) bool { + return str == "d_k" + }) + result, err := ValidateStruct(PrivateStruct{"d_k", 0, []int{1, 2}, []string{"hi", "super"}, [2]Address{{"Street", "123456"}, + {"Street", "123456"}}, Address{"Street", "123456"}, map[string]Address{"address": {"Street", "123456"}}}) + if !result { + t.Log("Case ", 6, ": expected ", true, " when result is ", result) + t.Error(err) + t.FailNow() + } +} + +type testByteArray [8]byte +type testByteMap map[byte]byte +type testByteSlice []byte +type testStringStringMap map[string]string +type testStringIntMap map[string]int + +func TestRequired(t *testing.T) { + + testString := "foobar" + testEmptyString := "" + var tests = []struct { + param interface{} + expected bool + }{ + { + struct { + Pointer *string `valid:"required"` + }{}, + false, + }, + { + struct { + Pointer *string `valid:"required"` + }{ + Pointer: &testEmptyString, + }, + false, + }, + { + struct { + Pointer *string `valid:"required"` + }{ + Pointer: &testString, + }, + true, + }, + { + struct { + Addr Address `valid:"required"` + }{}, + false, + }, + { + struct { + Addr Address `valid:"required"` + }{ + Addr: Address{"", "123"}, + }, + true, + }, + { + struct { + Pointer *Address `valid:"required"` + }{}, + false, + }, + { + struct { + Pointer *Address `valid:"required"` + }{ + Pointer: &Address{"", "123"}, + }, + true, + }, + { + struct { + TestByteArray testByteArray `valid:"required"` + }{}, + false, + }, + { + struct { + TestByteArray testByteArray `valid:"required"` + }{ + testByteArray{}, + }, + false, + }, + { + struct { + TestByteArray testByteArray `valid:"required"` + }{ + testByteArray{'1', '2', '3', '4', '5', '6', '7', 'A'}, + }, + true, + }, + { + struct { + TestByteMap testByteMap `valid:"required"` + }{}, + false, + }, + { + struct { + TestByteSlice testByteSlice `valid:"required"` + }{}, + false, + }, + { + struct { + TestStringStringMap testStringStringMap `valid:"required"` + }{ + testStringStringMap{"test": "test"}, + }, + true, + }, + { + struct { + TestIntMap testStringIntMap `valid:"required"` + }{ + testStringIntMap{"test": 42}, + }, + true, + }, + } + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } +} + +func TestErrorByField(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"message", ""}, + {"Message", ""}, + {"title", ""}, + {"Title", "My123 does not validate as alpha"}, + {"AuthorIP", "123 does not validate as ipv4"}, + } + post := &Post{"My123", "duck13126", "123"} + _, err := ValidateStruct(post) + + for _, test := range tests { + actual := ErrorByField(err, test.param) + if actual != test.expected { + t.Errorf("Expected ErrorByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestErrorsByField(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected string + }{ + {"Title", "My123 does not validate as alpha"}, + {"AuthorIP", "123 does not validate as ipv4"}, + } + post := &Post{Title: "My123", Message: "duck13126", AuthorIP: "123"} + _, err := ValidateStruct(post) + errs := ErrorsByField(err) + if len(errs) != 2 { + t.Errorf("There should only be 2 errors but got %v", len(errs)) + } + + for _, test := range tests { + if actual, ok := errs[test.param]; !ok || actual != test.expected { + t.Errorf("Expected ErrorsByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + tests = []struct { + param string + expected string + }{ + {"Title", ";:;message;:; does not validate as length(1|10)"}, + {"Body", ";:;message;:; does not validate as length(1|10)"}, + } + + message := &MessageWithSeveralFieldsStruct{Title: ";:;message;:;", Body: ";:;message;:;"} + _, err = ValidateStruct(message) + errs = ErrorsByField(err) + if len(errs) != 2 { + t.Errorf("There should only be 2 errors but got %v", len(errs)) + } + + for _, test := range tests { + if actual, ok := errs[test.param]; !ok || actual != test.expected { + t.Errorf("Expected ErrorsByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + tests = []struct { + param string + expected string + }{ + {"CustomField", "An error occurred"}, + } + + err = Error{"CustomField", fmt.Errorf("An error occurred"), false, "hello", []string{}} + errs = ErrorsByField(err) + + if len(errs) != 1 { + t.Errorf("There should only be 1 errors but got %v", len(errs)) + } + + for _, test := range tests { + if actual, ok := errs[test.param]; !ok || actual != test.expected { + t.Errorf("Expected ErrorsByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } + + type StructWithCustomValidation struct { + Email string `valid:"email"` + ID string `valid:"falseValidation"` + } + + CustomTypeTagMap.Set("falseValidation", CustomTypeValidator(func(i interface{}, o interface{}) bool { + return false + })) + + tests = []struct { + param string + expected string + }{ + {"Email", "My123 does not validate as email"}, + {"ID", "duck13126 does not validate as falseValidation"}, + } + s := &StructWithCustomValidation{Email: "My123", ID: "duck13126"} + _, err = ValidateStruct(s) + errs = ErrorsByField(err) + if len(errs) != 2 { + t.Errorf("There should only be 2 errors but got %v", len(errs)) + } + + for _, test := range tests { + if actual, ok := errs[test.param]; !ok || actual != test.expected { + t.Errorf("Expected ErrorsByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestValidateStructPointers(t *testing.T) { + // Struct which uses pointers for values + type UserWithPointers struct { + Name *string `valid:"-"` + Email *string `valid:"email"` + FavoriteFood *string `valid:"length(0|32)"` + Nerd *bool `valid:"-"` + } + + var tests = []struct { + param string + expected string + }{ + {"Name", ""}, + {"Email", "invalid does not validate as email"}, + {"FavoriteFood", ""}, + {"Nerd", ""}, + } + + name := "Herman" + email := "invalid" + food := "Pizza" + nerd := true + user := &UserWithPointers{&name, &email, &food, &nerd} + _, err := ValidateStruct(user) + + for _, test := range tests { + actual := ErrorByField(err, test.param) + if actual != test.expected { + t.Errorf("Expected ErrorByField(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func ExampleValidateStruct() { + type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + AuthorIP string `valid:"ipv4"` + } + post := &Post{"My Example Post", "duck", "123.234.54.3"} + + //Add your own struct validation tags + TagMap["duck"] = Validator(func(str string) bool { + return str == "duck" + }) + + result, err := ValidateStruct(post) + if err != nil { + println("error: " + err.Error()) + } + println(result) +} + +func TestValidateStructParamValidatorInt(t *testing.T) { + type Test1 struct { + Int int `valid:"range(1|10)"` + Int8 int8 `valid:"range(1|10)"` + Int16 int16 `valid:"range(1|10)"` + Int32 int32 `valid:"range(1|10)"` + Int64 int64 `valid:"range(1|10)"` + + Uint uint `valid:"range(1|10)"` + Uint8 uint8 `valid:"range(1|10)"` + Uint16 uint16 `valid:"range(1|10)"` + Uint32 uint32 `valid:"range(1|10)"` + Uint64 uint64 `valid:"range(1|10)"` + + Float32 float32 `valid:"range(1|10)"` + Float64 float64 `valid:"range(1|10)"` + } + test1Ok := &Test1{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} + test1NotOk := &Test1{11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11} + + _, err := ValidateStruct(test1Ok) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test1NotOk) + if err == nil { + t.Errorf("Test failed: nil") + } + + type Test2 struct { + Int int `valid:"in(1|10)"` + Int8 int8 `valid:"in(1|10)"` + Int16 int16 `valid:"in(1|10)"` + Int32 int32 `valid:"in(1|10)"` + Int64 int64 `valid:"in(1|10)"` + + Uint uint `valid:"in(1|10)"` + Uint8 uint8 `valid:"in(1|10)"` + Uint16 uint16 `valid:"in(1|10)"` + Uint32 uint32 `valid:"in(1|10)"` + Uint64 uint64 `valid:"in(1|10)"` + + Float32 float32 `valid:"in(1|10)"` + Float64 float64 `valid:"in(1|10)"` + } + + test2Ok1 := &Test2{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + test2Ok2 := &Test2{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10} + test2NotOk := &Test2{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2} + + _, err = ValidateStruct(test2Ok1) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test2Ok2) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test2NotOk) + if err == nil { + t.Errorf("Test failed: nil") + } + + type Test3 struct { + Int int `valid:"in(1|10),int"` + Int8 int8 `valid:"in(1|10),int8"` + Int16 int16 `valid:"in(1|10),int16"` + Int32 int32 `valid:"in(1|10),int32"` + Int64 int64 `valid:"in(1|10),int64"` + + Uint uint `valid:"in(1|10),uint"` + Uint8 uint8 `valid:"in(1|10),uint8"` + Uint16 uint16 `valid:"in(1|10),uint16"` + Uint32 uint32 `valid:"in(1|10),uint32"` + Uint64 uint64 `valid:"in(1|10),uint64"` + + Float32 float32 `valid:"in(1|10),float32"` + Float64 float64 `valid:"in(1|10),float64"` + } + + test3Ok1 := &Test2{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + test3Ok2 := &Test2{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10} + test3NotOk := &Test2{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2} + + _, err = ValidateStruct(test3Ok1) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test3Ok2) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test3NotOk) + if err == nil { + t.Errorf("Test failed: nil") + } +} + +func TestValidateStructUpperAndLowerCaseWithNumTypeCheck(t *testing.T) { + + type StructCapital struct { + Total float32 `valid:"float,required"` + } + + structCapital := &StructCapital{53.3535} + _, err := ValidateStruct(structCapital) + if err != nil { + t.Errorf("Test failed: nil") + fmt.Println(err) + } + + type StructLower struct { + total float32 `valid:"float,required"` + } + + structLower := &StructLower{53.3535} + _, err = ValidateStruct(structLower) + if err != nil { + t.Errorf("Test failed: nil") + fmt.Println(err) + } +} + +func TestIsCIDR(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"193.168.3.20/7", true}, + {"2001:db8::/32", true}, + {"2001:0db8:85a3:0000:0000:8a2e:0370:7334/64", true}, + {"193.138.3.20/60", false}, + {"500.323.2.23/43", false}, + {"", false}, + } + for _, test := range tests { + actual := IsCIDR(test.param) + if actual != test.expected { + t.Errorf("Expected IsCIDR(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestOptionalCustomValidators(t *testing.T) { + + CustomTypeTagMap.Set("f2", CustomTypeValidator(func(i interface{}, o interface{}) bool { + return false + })) + + var val struct { + WithCustomError string `valid:"f2~boom,optional"` + WithoutCustomError string `valid:"f2,optional"` + OptionalFirst string `valid:"optional,f2"` + } + + ok, err := ValidateStruct(val) + + if err != nil { + t.Errorf("Expected nil err with optional validation, got %v", err) + } + + if !ok { + t.Error("Expected validation to return true, got false") + } +} + +func TestJSONValidator(t *testing.T) { + + var val struct { + WithJSONName string `json:"with_json_name" valid:"-,required"` + WithoutJSONName string `valid:"-,required"` + WithJSONOmit string `json:"with_other_json_name,omitempty" valid:"-,required"` + WithJSONOption string `json:",omitempty" valid:"-,required"` + WithEmptyJSONName string `json:"-" valid:"-,required"` + } + + _, err := ValidateStruct(val) + + if err == nil { + t.Error("Expected error but got no error") + } + + if Contains(err.Error(), "WithJSONName") { + t.Errorf("Expected error message to contain with_json_name but actual error is: %s", err.Error()) + } + + if !Contains(err.Error(), "WithoutJSONName") { + t.Errorf("Expected error message to contain WithoutJSONName but actual error is: %s", err.Error()) + } + + if Contains(err.Error(), "omitempty") { + t.Errorf("Expected error message to not contain ',omitempty' but actual error is: %s", err.Error()) + } + + if !Contains(err.Error(), "WithEmptyJSONName") { + t.Errorf("Expected error message to contain WithEmptyJSONName but actual error is: %s", err.Error()) + } +} + +func TestValidatorIncludedInError(t *testing.T) { + post := Post{ + Title: "", + Message: "ðŸ‘", + AuthorIP: "xyz", + } + + validatorMap := map[string]string{ + "Title": "required", + "Message": "ascii", + "AuthorIP": "ipv4", + } + + ok, errors := ValidateStruct(post) + if ok { + t.Errorf("expected validation to fail %v", ok) + } + + for _, e := range errors.(Errors) { + casted := e.(Error) + if validatorMap[casted.Name] != casted.Validator { + t.Errorf("expected validator for %s to be %s, but was %s", casted.Name, validatorMap[casted.Name], casted.Validator) + } + } + + // check to make sure that validators with arguments (like length(1|10)) don't include the arguments + // in the validator name + message := MessageWithSeveralFieldsStruct{ + Title: "", + Body: "asdfasdfasdfasdfasdf", + } + + validatorMap = map[string]string{ + "Title": "length", + "Body": "length", + } + + ok, errors = ValidateStruct(message) + if ok { + t.Errorf("expected validation to fail, %v", ok) + } + + for _, e := range errors.(Errors) { + casted := e.(Error) + if validatorMap[casted.Name] != casted.Validator { + t.Errorf("expected validator for %s to be %s, but was %s", casted.Name, validatorMap[casted.Name], casted.Validator) + } + } + + // make sure validators with custom messages don't show up in the validator string + type CustomMessage struct { + Text string `valid:"length(1|10)~Custom message"` + } + cs := CustomMessage{Text: "asdfasdfasdfasdf"} + + ok, errors = ValidateStruct(&cs) + if ok { + t.Errorf("expected validation to fail, %v", ok) + } + + validator := errors.(Errors)[0].(Error).Validator + if validator != "length" { + t.Errorf("expected validator for Text to be length, but was %s", validator) + } + +} + +func TestIsRsaPublicKey(t *testing.T) { + var tests = []struct { + rsastr string + keylen int + expected bool + }{ + {`fubar`, 2048, false}, + {`MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvncDCeibmEkabJLmFec7x9y86RP6dIvkVxxbQoOJo06E+p7tH6vCmiGHKnuu +XwKYLq0DKUE3t/HHsNdowfD9+NH8caLzmXqGBx45/Dzxnwqz0qYq7idK+Qff34qrk/YFoU7498U1Ee7PkKb7/VE9BmMEcI3uoKbeXCbJRI +HoTp8bUXOpNTSUfwUNwJzbm2nsHo2xu6virKtAZLTsJFzTUmRd11MrWCvj59lWzt1/eIMN+ekjH8aXeLOOl54CL+kWp48C+V9BchyKCShZ +B7ucimFvjHTtuxziXZQRO7HlcsBOa0WwvDJnRnskdyoD31s4F4jpKEYBJNWTo63v6lUvbQIDAQAB`, 2048, true}, + {`MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvncDCeibmEkabJLmFec7x9y86RP6dIvkVxxbQoOJo06E+p7tH6vCmiGHKnuu +XwKYLq0DKUE3t/HHsNdowfD9+NH8caLzmXqGBx45/Dzxnwqz0qYq7idK+Qff34qrk/YFoU7498U1Ee7PkKb7/VE9BmMEcI3uoKbeXCbJRI +HoTp8bUXOpNTSUfwUNwJzbm2nsHo2xu6virKtAZLTsJFzTUmRd11MrWCvj59lWzt1/eIMN+ekjH8aXeLOOl54CL+kWp48C+V9BchyKCShZ +B7ucimFvjHTtuxziXZQRO7HlcsBOa0WwvDJnRnskdyoD31s4F4jpKEYBJNWTo63v6lUvbQIDAQAB`, 1024, false}, + {`-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvncDCeibmEkabJLmFec7 +x9y86RP6dIvkVxxbQoOJo06E+p7tH6vCmiGHKnuuXwKYLq0DKUE3t/HHsNdowfD9 ++NH8caLzmXqGBx45/Dzxnwqz0qYq7idK+Qff34qrk/YFoU7498U1Ee7PkKb7/VE9 +BmMEcI3uoKbeXCbJRIHoTp8bUXOpNTSUfwUNwJzbm2nsHo2xu6virKtAZLTsJFzT +UmRd11MrWCvj59lWzt1/eIMN+ekjH8aXeLOOl54CL+kWp48C+V9BchyKCShZB7uc +imFvjHTtuxziXZQRO7HlcsBOa0WwvDJnRnskdyoD31s4F4jpKEYBJNWTo63v6lUv +bQIDAQAB +-----END PUBLIC KEY-----`, 2048, true}, + {`-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvncDCeibmEkabJLmFec7 +x9y86RP6dIvkVxxbQoOJo06E+p7tH6vCmiGHKnuuXwKYLq0DKUE3t/HHsNdowfD9 ++NH8caLzmXqGBx45/Dzxnwqz0qYq7idK+Qff34qrk/YFoU7498U1Ee7PkKb7/VE9 +BmMEcI3uoKbeXCbJRIHoTp8bUXOpNTSUfwUNwJzbm2nsHo2xu6virKtAZLTsJFzT +UmRd11MrWCvj59lWzt1/eIMN+ekjH8aXeLOOl54CL+kWp48C+V9BchyKCShZB7uc +imFvjHTtuxziXZQRO7HlcsBOa0WwvDJnRnskdyoD31s4F4jpKEYBJNWTo63v6lUv +bQIDAQAB +-----END PUBLIC KEY-----`, 4096, false}, + } + for i, test := range tests { + actual := IsRsaPublicKey(test.rsastr, test.keylen) + if actual != test.expected { + t.Errorf("Expected TestIsRsaPublicKey(%d, %d) to be %v, got %v", i, test.keylen, test.expected, actual) + } + } +} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 000000000..cac7a5fcf --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race ./... diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 000000000..80bed650e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 000000000..bde823d8a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 000000000..df83a9c2f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000..fd62e9490 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,96 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 000000000..f48365faf --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000..b605b4509 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,105 @@ +## `jwt-go` Version History + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 000000000..f0228f02e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md new file mode 100644 index 000000000..4a68ba40a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md @@ -0,0 +1,13 @@ +`jwt` command-line tool +======================= + +This is a simple tool to sign, verify and show JSON Web Tokens from +the command line. + +The following will create and sign a token, then verify it and output the original claims: + + echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - + +To simply display a token, use: + + echo $JWT | jwt -show - diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go new file mode 100644 index 000000000..c03711474 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go @@ -0,0 +1,245 @@ +// A useful example app. You can use this to debug your tokens on the command line. +// This is also a great place to look at how you might use this library. +// +// Example usage: +// The following will create and sign a token, then verify it and output the original claims. +// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + + jwt "github.com/dgrijalva/jwt-go" +) + +var ( + // Options + flagAlg = flag.String("alg", "", "signing algorithm identifier") + flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") + flagCompact = flag.Bool("compact", false, "output compact JSON") + flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") + + // Modes - exactly one of these is required + flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") + flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") + flagShow = flag.String("show", "", "path to JWT file or '-' to read from stdin") +) + +func main() { + // Usage message if you ask for -help or if you mess up inputs. + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") + flag.PrintDefaults() + } + + // Parse command line options + flag.Parse() + + // Do the thing. If something goes wrong, print error to stderr + // and exit with a non-zero status code + if err := start(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +// Figure out which thing to do and then do that +func start() error { + if *flagSign != "" { + return signToken() + } else if *flagVerify != "" { + return verifyToken() + } else if *flagShow != "" { + return showToken() + } else { + flag.Usage() + return fmt.Errorf("None of the required flags are present. What do you want me to do?") + } +} + +// Helper func: Read input from specified file or stdin +func loadData(p string) ([]byte, error) { + if p == "" { + return nil, fmt.Errorf("No path specified") + } + + var rdr io.Reader + if p == "-" { + rdr = os.Stdin + } else { + if f, err := os.Open(p); err == nil { + rdr = f + defer f.Close() + } else { + return nil, err + } + } + return ioutil.ReadAll(rdr) +} + +// Print a json object in accordance with the prophecy (or the command line options) +func printJSON(j interface{}) error { + var out []byte + var err error + + if *flagCompact == false { + out, err = json.MarshalIndent(j, "", " ") + } else { + out, err = json.Marshal(j) + } + + if err == nil { + fmt.Println(string(out)) + } + + return err +} + +// Verify a token and output the claims. This is a great example +// of how to verify and view a token. +func verifyToken() error { + // get the token + tokData, err := loadData(*flagVerify) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + // Parse the token. Load the key from command line option + token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { + data, err := loadData(*flagKey) + if err != nil { + return nil, err + } + if isEs() { + return jwt.ParseECPublicKeyFromPEM(data) + } + return data, nil + }) + + // Print some debug data + if *flagDebug && token != nil { + fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) + fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) + } + + // Print an error if we can't parse for some reason + if err != nil { + return fmt.Errorf("Couldn't parse token: %v", err) + } + + // Is token invalid? + if !token.Valid { + return fmt.Errorf("Token is invalid") + } + + // Print the token details + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +// Create, sign, and output a token. This is a great, simple example of +// how to use this library to create and sign a token. +func signToken() error { + // get the token data from command line arguments + tokData, err := loadData(*flagSign) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } else if *flagDebug { + fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) + } + + // parse the JSON of the claims + var claims jwt.MapClaims + if err := json.Unmarshal(tokData, &claims); err != nil { + return fmt.Errorf("Couldn't parse claims JSON: %v", err) + } + + // get the key + var key interface{} + key, err = loadData(*flagKey) + if err != nil { + return fmt.Errorf("Couldn't read key: %v", err) + } + + // get the signing alg + alg := jwt.GetSigningMethod(*flagAlg) + if alg == nil { + return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) + } + + // create a new token + token := jwt.NewWithClaims(alg, claims) + + if isEs() { + if k, ok := key.([]byte); !ok { + return fmt.Errorf("Couldn't convert key data to key") + } else { + key, err = jwt.ParseECPrivateKeyFromPEM(k) + if err != nil { + return err + } + } + } + + if out, err := token.SignedString(key); err == nil { + fmt.Println(out) + } else { + return fmt.Errorf("Error signing token: %v", err) + } + + return nil +} + +// showToken pretty-prints the token on the command line. +func showToken() error { + // get the token + tokData, err := loadData(*flagShow) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + token, err := jwt.Parse(string(tokData), nil) + if token == nil { + return fmt.Errorf("malformed token: %v", err) + } + + // Print the token details + fmt.Println("Header:") + if err := printJSON(token.Header); err != nil { + return fmt.Errorf("Failed to output header: %v", err) + } + + fmt.Println("Claims:") + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +func isEs() bool { + return strings.HasPrefix(*flagAlg, "ES") +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 000000000..a86dc1a3b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 000000000..2f59a2223 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go new file mode 100644 index 000000000..753047b1e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go @@ -0,0 +1,100 @@ +package jwt_test + +import ( + "crypto/ecdsa" + "io/ioutil" + "strings" + "testing" + + "github.com/dgrijalva/jwt-go" +) + +var ecdsaTestData = []struct { + name string + keys map[string]string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic ES256", + map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", + "ES256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic ES384", + map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", + "ES384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic ES512", + map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", + "ES512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic ES256 invalid: foo => bar", + map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, + "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", + "ES256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestECDSAVerify(t *testing.T) { + for _, data := range ecdsaTestData { + var err error + + key, _ := ioutil.ReadFile(data.keys["public"]) + + var ecdsaKey *ecdsa.PublicKey + if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse ECDSA public key: %v", err) + } + + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestECDSASign(t *testing.T) { + for _, data := range ecdsaTestData { + var err error + key, _ := ioutil.ReadFile(data.keys["private"]) + + var ecdsaKey *ecdsa.PrivateKey + if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse ECDSA private key: %v", err) + } + + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig == parts[2] { + t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) + } + } + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000..d19624b72 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 000000000..662df19d4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,63 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } + return e.Inner.Error() +} + +// No errors +func (e *ValidationError) valid() bool { + if e.Errors > 0 { + return false + } + return true +} diff --git a/vendor/github.com/dgrijalva/jwt-go/example_test.go b/vendor/github.com/dgrijalva/jwt-go/example_test.go new file mode 100644 index 000000000..ae8b788a0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/example_test.go @@ -0,0 +1,114 @@ +package jwt_test + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "time" +) + +// Example (atypical) using the StandardClaims type by itself to parse a token. +// The StandardClaims type is designed to be embedded into your custom types +// to provide standard validation features. You can use it alone, but there's +// no way to retrieve other fields after parsing. +// See the CustomClaimsType example for intended usage. +func ExampleNewWithClaims_standardClaims() { + mySigningKey := []byte("AllYourBase") + + // Create the Claims + claims := &jwt.StandardClaims{ + ExpiresAt: 15000, + Issuer: "test", + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + ss, err := token.SignedString(mySigningKey) + fmt.Printf("%v %v", ss, err) + //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.QsODzZu3lUZMVdhbO76u3Jv02iYCvEHcYVUI1kOWEU0 +} + +// Example creating a token using a custom claims type. The StandardClaim is embedded +// in the custom type to allow for easy encoding, parsing and validation of standard claims. +func ExampleNewWithClaims_customClaimsType() { + mySigningKey := []byte("AllYourBase") + + type MyCustomClaims struct { + Foo string `json:"foo"` + jwt.StandardClaims + } + + // Create the Claims + claims := MyCustomClaims{ + "bar", + jwt.StandardClaims{ + ExpiresAt: 15000, + Issuer: "test", + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + ss, err := token.SignedString(mySigningKey) + fmt.Printf("%v %v", ss, err) + //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c +} + +// Example creating a token using a custom claims type. The StandardClaim is embedded +// in the custom type to allow for easy encoding, parsing and validation of standard claims. +func ExampleParseWithClaims_customClaimsType() { + tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" + + type MyCustomClaims struct { + Foo string `json:"foo"` + jwt.StandardClaims + } + + // sample token is expired. override time so it parses as valid + at(time.Unix(0, 0), func() { + token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) { + return []byte("AllYourBase"), nil + }) + + if claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid { + fmt.Printf("%v %v", claims.Foo, claims.StandardClaims.ExpiresAt) + } else { + fmt.Println(err) + } + }) + + // Output: bar 15000 +} + +// Override time value for tests. Restore default value after. +func at(t time.Time, f func()) { + jwt.TimeFunc = func() time.Time { + return t + } + f() + jwt.TimeFunc = time.Now +} + +// An example of parsing the error types using bitfield checks +func ExampleParse_errorChecking() { + // Token from another example. This token is expired + var tokenString = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" + + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + return []byte("AllYourBase"), nil + }) + + if token.Valid { + fmt.Println("You look nice today") + } else if ve, ok := err.(*jwt.ValidationError); ok { + if ve.Errors&jwt.ValidationErrorMalformed != 0 { + fmt.Println("That's not even a token") + } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { + // Token is either expired or not active yet + fmt.Println("Timing is everything") + } else { + fmt.Println("Couldn't handle this token:", err) + } + } else { + fmt.Println("Couldn't handle this token:", err) + } + + // Output: Timing is everything +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 000000000..c22991925 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go new file mode 100644 index 000000000..8fb567820 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go @@ -0,0 +1,64 @@ +package jwt_test + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "time" +) + +// For HMAC signing method, the key can be any []byte. It is recommended to generate +// a key using crypto/rand or something equivalent. You need the same key for signing +// and validating. +var hmacSampleSecret []byte + +func init() { + // Load sample key data + if keyData, e := ioutil.ReadFile("test/hmacTestKey"); e == nil { + hmacSampleSecret = keyData + } else { + panic(e) + } +} + +// Example creating, signing, and encoding a JWT token using the HMAC signing method +func ExampleNew_hmac() { + // Create a new token object, specifying signing method and the claims + // you would like it to contain. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "foo": "bar", + "nbf": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(), + }) + + // Sign and get the complete encoded token as a string using the secret + tokenString, err := token.SignedString(hmacSampleSecret) + + fmt.Println(tokenString, err) + // Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU +} + +// Example parsing and validating a token using the HMAC signing method +func ExampleParse_hmac() { + // sample token string taken from the New example + tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU" + + // Parse takes the token string and a function for looking up the key. The latter is especially + // useful if you use multiple keys for your application. The standard is to use 'kid' in the + // head of the token to identify which key to use, but the parsed token (head and claims) is provided + // to the callback, providing flexibility. + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + return hmacSampleSecret, nil + }) + + if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { + fmt.Println(claims["foo"], claims["nbf"]) + } else { + fmt.Println(err) + } + + // Output: bar 1.4444784e+09 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_test.go new file mode 100644 index 000000000..c7e114f4f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac_test.go @@ -0,0 +1,91 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "strings" + "testing" +) + +var hmacTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "web sample", + "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", + "HS256", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "HS384", + "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", + "HS384", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "HS512", + "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", + "HS512", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "web sample: invalid", + "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", + "HS256", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + false, + }, +} + +// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 +var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") + +func TestHMACVerify(t *testing.T) { + for _, data := range hmacTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestHMACSign(t *testing.T) { + for _, data := range hmacTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} + +func BenchmarkHS256Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) +} + +func BenchmarkHS384Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) +} + +func BenchmarkHS512Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/http_example_test.go b/vendor/github.com/dgrijalva/jwt-go/http_example_test.go new file mode 100644 index 000000000..82e9c50a4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/http_example_test.go @@ -0,0 +1,216 @@ +package jwt_test + +// Example HTTP auth using asymmetric crypto/RSA keys +// This is based on a (now outdated) example at https://gist.github.com/cryptix/45c33ecf0ae54828e63b + +import ( + "bytes" + "crypto/rsa" + "fmt" + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/request" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// location of the files used for signing and verification +const ( + privKeyPath = "test/sample_key" // openssl genrsa -out app.rsa keysize + pubKeyPath = "test/sample_key.pub" // openssl rsa -in app.rsa -pubout > app.rsa.pub +) + +var ( + verifyKey *rsa.PublicKey + signKey *rsa.PrivateKey + serverPort int + // storing sample username/password pairs + // don't do this on a real server + users = map[string]string{ + "test": "known", + } +) + +// read the key files before starting http handlers +func init() { + signBytes, err := ioutil.ReadFile(privKeyPath) + fatal(err) + + signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + fatal(err) + + verifyBytes, err := ioutil.ReadFile(pubKeyPath) + fatal(err) + + verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + fatal(err) + + http.HandleFunc("/authenticate", authHandler) + http.HandleFunc("/restricted", restrictedHandler) + + // Setup listener + listener, err := net.ListenTCP("tcp", &net.TCPAddr{}) + serverPort = listener.Addr().(*net.TCPAddr).Port + + log.Println("Listening...") + go func() { + fatal(http.Serve(listener, nil)) + }() +} + +var start func() + +func fatal(err error) { + if err != nil { + log.Fatal(err) + } +} + +// Define some custom types were going to use within our tokens +type CustomerInfo struct { + Name string + Kind string +} + +type CustomClaimsExample struct { + *jwt.StandardClaims + TokenType string + CustomerInfo +} + +func Example_getTokenViaHTTP() { + // See func authHandler for an example auth handler that produces a token + res, err := http.PostForm(fmt.Sprintf("http://localhost:%v/authenticate", serverPort), url.Values{ + "user": {"test"}, + "pass": {"known"}, + }) + if err != nil { + fatal(err) + } + + if res.StatusCode != 200 { + fmt.Println("Unexpected status code", res.StatusCode) + } + + // Read the token out of the response body + buf := new(bytes.Buffer) + io.Copy(buf, res.Body) + res.Body.Close() + tokenString := strings.TrimSpace(buf.String()) + + // Parse the token + token, err := jwt.ParseWithClaims(tokenString, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { + // since we only use the one private key to sign the tokens, + // we also only use its public counter part to verify + return verifyKey, nil + }) + fatal(err) + + claims := token.Claims.(*CustomClaimsExample) + fmt.Println(claims.CustomerInfo.Name) + + //Output: test +} + +func Example_useTokenViaHTTP() { + + // Make a sample token + // In a real world situation, this token will have been acquired from + // some other API call (see Example_getTokenViaHTTP) + token, err := createToken("foo") + fatal(err) + + // Make request. See func restrictedHandler for example request processor + req, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:%v/restricted", serverPort), nil) + fatal(err) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", token)) + res, err := http.DefaultClient.Do(req) + fatal(err) + + // Read the response body + buf := new(bytes.Buffer) + io.Copy(buf, res.Body) + res.Body.Close() + fmt.Println(buf.String()) + + // Output: Welcome, foo +} + +func createToken(user string) (string, error) { + // create a signer for rsa 256 + t := jwt.New(jwt.GetSigningMethod("RS256")) + + // set our claims + t.Claims = &CustomClaimsExample{ + &jwt.StandardClaims{ + // set the expire time + // see http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-20#section-4.1.4 + ExpiresAt: time.Now().Add(time.Minute * 1).Unix(), + }, + "level1", + CustomerInfo{user, "human"}, + } + + // Creat token string + return t.SignedString(signKey) +} + +// reads the form values, checks them and creates the token +func authHandler(w http.ResponseWriter, r *http.Request) { + // make sure its post + if r.Method != "POST" { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintln(w, "No POST", r.Method) + return + } + + user := r.FormValue("user") + pass := r.FormValue("pass") + + log.Printf("Authenticate: user[%s] pass[%s]\n", user, pass) + + // check values + if user != "test" || pass != "known" { + w.WriteHeader(http.StatusForbidden) + fmt.Fprintln(w, "Wrong info") + return + } + + tokenString, err := createToken(user) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, "Sorry, error while Signing Token!") + log.Printf("Token Signing error: %v\n", err) + return + } + + w.Header().Set("Content-Type", "application/jwt") + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, tokenString) +} + +// only accessible with a valid token +func restrictedHandler(w http.ResponseWriter, r *http.Request) { + // Get token from request + token, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { + // since we only use the one private key to sign the tokens, + // we also only use its public counter part to verify + return verifyKey, nil + }) + + // If the token is missing or invalid, return error + if err != nil { + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprintln(w, "Invalid token:", err) + return + } + + // Token is valid + fmt.Fprintln(w, "Welcome,", token.Claims.(*CustomClaimsExample).Name) + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 000000000..291213c46 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 000000000..f04d189d0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none_test.go b/vendor/github.com/dgrijalva/jwt-go/none_test.go new file mode 100644 index 000000000..29a69efef --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none_test.go @@ -0,0 +1,72 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "strings" + "testing" +) + +var noneTestData = []struct { + name string + tokenString string + alg string + key interface{} + claims map[string]interface{} + valid bool +}{ + { + "Basic", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", + "none", + jwt.UnsafeAllowNoneSignatureType, + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic - no key", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", + "none", + nil, + map[string]interface{}{"foo": "bar"}, + false, + }, + { + "Signed", + "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", + "none", + jwt.UnsafeAllowNoneSignatureType, + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestNoneVerify(t *testing.T) { + for _, data := range noneTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], data.key) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestNoneSign(t *testing.T) { + for _, data := range noneTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), data.key) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 000000000..7bf1c4ea0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser_test.go b/vendor/github.com/dgrijalva/jwt-go/parser_test.go new file mode 100644 index 000000000..e62714dc1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser_test.go @@ -0,0 +1,261 @@ +package jwt_test + +import ( + "crypto/rsa" + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/test" +) + +var keyFuncError error = fmt.Errorf("error loading key") + +var ( + jwtTestDefaultKey *rsa.PublicKey + defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } + emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } + errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, keyFuncError } + nilKeyFunc jwt.Keyfunc = nil +) + +func init() { + jwtTestDefaultKey = test.LoadRSAPublicKeyFromDisk("test/sample_key.pub") +} + +var jwtTestData = []struct { + name string + tokenString string + keyfunc jwt.Keyfunc + claims jwt.Claims + valid bool + errors uint32 + parser *jwt.Parser +}{ + { + "basic", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + true, + 0, + nil, + }, + { + "basic expired", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, + false, + jwt.ValidationErrorExpired, + nil, + }, + { + "basic nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, + false, + jwt.ValidationErrorNotValidYet, + nil, + }, + { + "expired and nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, + false, + jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, + nil, + }, + { + "basic invalid", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + nil, + }, + { + "basic nokeyfunc", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + nilKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorUnverifiable, + nil, + }, + { + "basic nokey", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + emptyKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + nil, + }, + { + "basic errorkey", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + errorKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorUnverifiable, + nil, + }, + { + "invalid signing method", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + &jwt.Parser{ValidMethods: []string{"HS256"}}, + }, + { + "valid signing method", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + true, + 0, + &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, + }, + { + "JSON Number", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": json.Number("123.4")}, + true, + 0, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "Standard Claims", + "", + defaultKeyFunc, + &jwt.StandardClaims{ + ExpiresAt: time.Now().Add(time.Second * 10).Unix(), + }, + true, + 0, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - basic expired", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, + false, + jwt.ValidationErrorExpired, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - basic nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, + false, + jwt.ValidationErrorNotValidYet, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - expired and nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100)), "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, + false, + jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "SkipClaimsValidation during token parsing", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, + true, + 0, + &jwt.Parser{UseJSONNumber: true, SkipClaimsValidation: true}, + }, +} + +func TestParser_Parse(t *testing.T) { + privateKey := test.LoadRSAPrivateKeyFromDisk("test/sample_key") + + // Iterate over test data set and run tests + for _, data := range jwtTestData { + // If the token string is blank, use helper function to generate string + if data.tokenString == "" { + data.tokenString = test.MakeSampleToken(data.claims, privateKey) + } + + // Parse the token + var token *jwt.Token + var err error + var parser = data.parser + if parser == nil { + parser = new(jwt.Parser) + } + // Figure out correct claims type + switch data.claims.(type) { + case jwt.MapClaims: + token, err = parser.ParseWithClaims(data.tokenString, jwt.MapClaims{}, data.keyfunc) + case *jwt.StandardClaims: + token, err = parser.ParseWithClaims(data.tokenString, &jwt.StandardClaims{}, data.keyfunc) + } + + // Verify result matches expectation + if !reflect.DeepEqual(data.claims, token.Claims) { + t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) + } + + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) + } + + if !data.valid && err == nil { + t.Errorf("[%v] Invalid token passed validation", data.name) + } + + if (err == nil && !token.Valid) || (err != nil && token.Valid) { + t.Errorf("[%v] Inconsistent behavior between returned error and token.Valid") + } + + if data.errors != 0 { + if err == nil { + t.Errorf("[%v] Expecting error. Didn't get one.", data.name) + } else { + + ve := err.(*jwt.ValidationError) + // compare the bitfield part of the error + if e := ve.Errors; e != data.errors { + t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) + } + + if err.Error() == keyFuncError.Error() && ve.Inner != keyFuncError { + t.Errorf("[%v] Inner error does not match expectation. %v != %v", data.name, ve.Inner, keyFuncError) + } + } + } + if data.valid && token.Signature == "" { + t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) + } + } +} + +// Helper method for benchmarking various methods +func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { + t := jwt.New(method) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if _, err := t.SignedString(key); err != nil { + b.Fatal(err) + } + } + }) + +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/doc.go b/vendor/github.com/dgrijalva/jwt-go/request/doc.go new file mode 100644 index 000000000..c01069c98 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/doc.go @@ -0,0 +1,7 @@ +// Utility package for extracting JWT tokens from +// HTTP requests. +// +// The main function is ParseFromRequest and it's WithClaims variant. +// See examples for how to use the various Extractor implementations +// or roll your own. +package request diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor.go new file mode 100644 index 000000000..14414fe2f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/extractor.go @@ -0,0 +1,81 @@ +package request + +import ( + "errors" + "net/http" +) + +// Errors +var ( + ErrNoTokenInRequest = errors.New("no token present in request") +) + +// Interface for extracting a token from an HTTP request. +// The ExtractToken method should return a token string or an error. +// If no token is present, you must return ErrNoTokenInRequest. +type Extractor interface { + ExtractToken(*http.Request) (string, error) +} + +// Extractor for finding a token in a header. Looks at each specified +// header in order until there's a match +type HeaderExtractor []string + +func (e HeaderExtractor) ExtractToken(req *http.Request) (string, error) { + // loop over header names and return the first one that contains data + for _, header := range e { + if ah := req.Header.Get(header); ah != "" { + return ah, nil + } + } + return "", ErrNoTokenInRequest +} + +// Extract token from request arguments. This includes a POSTed form or +// GET URL arguments. Argument names are tried in order until there's a match. +// This extractor calls `ParseMultipartForm` on the request +type ArgumentExtractor []string + +func (e ArgumentExtractor) ExtractToken(req *http.Request) (string, error) { + // Make sure form is parsed + req.ParseMultipartForm(10e6) + + // loop over arg names and return the first one that contains data + for _, arg := range e { + if ah := req.Form.Get(arg); ah != "" { + return ah, nil + } + } + + return "", ErrNoTokenInRequest +} + +// Tries Extractors in order until one returns a token string or an error occurs +type MultiExtractor []Extractor + +func (e MultiExtractor) ExtractToken(req *http.Request) (string, error) { + // loop over header names and return the first one that contains data + for _, extractor := range e { + if tok, err := extractor.ExtractToken(req); tok != "" { + return tok, nil + } else if err != ErrNoTokenInRequest { + return "", err + } + } + return "", ErrNoTokenInRequest +} + +// Wrap an Extractor in this to post-process the value before it's handed off. +// See AuthorizationHeaderExtractor for an example +type PostExtractionFilter struct { + Extractor + Filter func(string) (string, error) +} + +func (e *PostExtractionFilter) ExtractToken(req *http.Request) (string, error) { + if tok, err := e.Extractor.ExtractToken(req); tok != "" { + return e.Filter(tok) + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go new file mode 100644 index 000000000..a994ffe58 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go @@ -0,0 +1,32 @@ +package request + +import ( + "fmt" + "net/url" +) + +const ( + exampleTokenA = "A" +) + +func ExampleHeaderExtractor() { + req := makeExampleRequest("GET", "/", map[string]string{"Token": exampleTokenA}, nil) + tokenString, err := HeaderExtractor{"Token"}.ExtractToken(req) + if err == nil { + fmt.Println(tokenString) + } else { + fmt.Println(err) + } + //Output: A +} + +func ExampleArgumentExtractor() { + req := makeExampleRequest("GET", "/", nil, url.Values{"token": {extractorTestTokenA}}) + tokenString, err := ArgumentExtractor{"token"}.ExtractToken(req) + if err == nil { + fmt.Println(tokenString) + } else { + fmt.Println(err) + } + //Output: A +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go new file mode 100644 index 000000000..e3bbb0a3e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go @@ -0,0 +1,91 @@ +package request + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +var extractorTestTokenA = "A" +var extractorTestTokenB = "B" + +var extractorTestData = []struct { + name string + extractor Extractor + headers map[string]string + query url.Values + token string + err error +}{ + { + name: "simple header", + extractor: HeaderExtractor{"Foo"}, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: nil, + token: extractorTestTokenA, + err: nil, + }, + { + name: "simple argument", + extractor: ArgumentExtractor{"token"}, + headers: map[string]string{}, + query: url.Values{"token": {extractorTestTokenA}}, + token: extractorTestTokenA, + err: nil, + }, + { + name: "multiple extractors", + extractor: MultiExtractor{ + HeaderExtractor{"Foo"}, + ArgumentExtractor{"token"}, + }, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: url.Values{"token": {extractorTestTokenB}}, + token: extractorTestTokenA, + err: nil, + }, + { + name: "simple miss", + extractor: HeaderExtractor{"This-Header-Is-Not-Set"}, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: nil, + token: "", + err: ErrNoTokenInRequest, + }, + { + name: "filter", + extractor: AuthorizationHeaderExtractor, + headers: map[string]string{"Authorization": "Bearer " + extractorTestTokenA}, + query: nil, + token: extractorTestTokenA, + err: nil, + }, +} + +func TestExtractor(t *testing.T) { + // Bearer token request + for _, data := range extractorTestData { + // Make request from test struct + r := makeExampleRequest("GET", "/", data.headers, data.query) + + // Test extractor + token, err := data.extractor.ExtractToken(r) + if token != data.token { + t.Errorf("[%v] Expected token '%v'. Got '%v'", data.name, data.token, token) + continue + } + if err != data.err { + t.Errorf("[%v] Expected error '%v'. Got '%v'", data.name, data.err, err) + continue + } + } +} + +func makeExampleRequest(method, path string, headers map[string]string, urlArgs url.Values) *http.Request { + r, _ := http.NewRequest(method, fmt.Sprintf("%v?%v", path, urlArgs.Encode()), nil) + for k, v := range headers { + r.Header.Set(k, v) + } + return r +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go b/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go new file mode 100644 index 000000000..5948694a5 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go @@ -0,0 +1,28 @@ +package request + +import ( + "strings" +) + +// Strips 'Bearer ' prefix from bearer token string +func stripBearerPrefixFromTokenString(tok string) (string, error) { + // Should be a bearer token + if len(tok) > 6 && strings.ToUpper(tok[0:7]) == "BEARER " { + return tok[7:], nil + } + return tok, nil +} + +// Extract bearer token from Authorization header +// Uses PostExtractionFilter to strip "Bearer " prefix from header +var AuthorizationHeaderExtractor = &PostExtractionFilter{ + HeaderExtractor{"Authorization"}, + stripBearerPrefixFromTokenString, +} + +// Extractor for OAuth2 access tokens. Looks in 'Authorization' +// header then 'access_token' argument for a token. +var OAuth2Extractor = &MultiExtractor{ + AuthorizationHeaderExtractor, + ArgumentExtractor{"access_token"}, +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/request.go b/vendor/github.com/dgrijalva/jwt-go/request/request.go new file mode 100644 index 000000000..1807b3965 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/request.go @@ -0,0 +1,24 @@ +package request + +import ( + "github.com/dgrijalva/jwt-go" + "net/http" +) + +// Extract and parse a JWT token from an HTTP request. +// This behaves the same as Parse, but accepts a request and an extractor +// instead of a token string. The Extractor interface allows you to define +// the logic for extracting a token. Several useful implementations are provided. +func ParseFromRequest(req *http.Request, extractor Extractor, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { + return ParseFromRequestWithClaims(req, extractor, jwt.MapClaims{}, keyFunc) +} + +// ParseFromRequest but with custom Claims type +func ParseFromRequestWithClaims(req *http.Request, extractor Extractor, claims jwt.Claims, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { + // Extract token from request + if tokStr, err := extractor.ExtractToken(req); err == nil { + return jwt.ParseWithClaims(tokStr, claims, keyFunc) + } else { + return nil, err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/request_test.go b/vendor/github.com/dgrijalva/jwt-go/request/request_test.go new file mode 100644 index 000000000..b4365cd86 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/request/request_test.go @@ -0,0 +1,103 @@ +package request + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/test" + "net/http" + "net/url" + "reflect" + "strings" + "testing" +) + +var requestTestData = []struct { + name string + claims jwt.MapClaims + extractor Extractor + headers map[string]string + query url.Values + valid bool +}{ + { + "authorization bearer token", + jwt.MapClaims{"foo": "bar"}, + AuthorizationHeaderExtractor, + map[string]string{"Authorization": "Bearer %v"}, + url.Values{}, + true, + }, + { + "oauth bearer token - header", + jwt.MapClaims{"foo": "bar"}, + OAuth2Extractor, + map[string]string{"Authorization": "Bearer %v"}, + url.Values{}, + true, + }, + { + "oauth bearer token - url", + jwt.MapClaims{"foo": "bar"}, + OAuth2Extractor, + map[string]string{}, + url.Values{"access_token": {"%v"}}, + true, + }, + { + "url token", + jwt.MapClaims{"foo": "bar"}, + ArgumentExtractor{"token"}, + map[string]string{}, + url.Values{"token": {"%v"}}, + true, + }, +} + +func TestParseRequest(t *testing.T) { + // load keys from disk + privateKey := test.LoadRSAPrivateKeyFromDisk("../test/sample_key") + publicKey := test.LoadRSAPublicKeyFromDisk("../test/sample_key.pub") + keyfunc := func(*jwt.Token) (interface{}, error) { + return publicKey, nil + } + + // Bearer token request + for _, data := range requestTestData { + // Make token from claims + tokenString := test.MakeSampleToken(data.claims, privateKey) + + // Make query string + for k, vv := range data.query { + for i, v := range vv { + if strings.Contains(v, "%v") { + data.query[k][i] = fmt.Sprintf(v, tokenString) + } + } + } + + // Make request from test struct + r, _ := http.NewRequest("GET", fmt.Sprintf("/?%v", data.query.Encode()), nil) + for k, v := range data.headers { + if strings.Contains(v, "%v") { + r.Header.Set(k, fmt.Sprintf(v, tokenString)) + } else { + r.Header.Set(k, tokenString) + } + } + token, err := ParseFromRequestWithClaims(r, data.extractor, jwt.MapClaims{}, keyfunc) + + if token == nil { + t.Errorf("[%v] Token was not found: %v", data.name, err) + continue + } + if !reflect.DeepEqual(data.claims, token.Claims) { + t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) + } + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying token: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid token passed validation", data.name) + } + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 000000000..0ae0b1984 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 000000000..10ee9db8a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go new file mode 100644 index 000000000..9045aaf34 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go @@ -0,0 +1,96 @@ +// +build go1.4 + +package jwt_test + +import ( + "crypto/rsa" + "io/ioutil" + "strings" + "testing" + + "github.com/dgrijalva/jwt-go" +) + +var rsaPSSTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic PS256", + "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", + "PS256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic PS384", + "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", + "PS384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic PS512", + "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", + "PS512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic PS256 invalid: foo => bar", + "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", + "PS256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestRSAPSSVerify(t *testing.T) { + var err error + + key, _ := ioutil.ReadFile("test/sample_key.pub") + var rsaPSSKey *rsa.PublicKey + if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse RSA public key: %v", err) + } + + for _, data := range rsaPSSTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestRSAPSSSign(t *testing.T) { + var err error + + key, _ := ioutil.ReadFile("test/sample_key") + var rsaPSSKey *rsa.PrivateKey + if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse RSA private key: %v", err) + } + + for _, data := range rsaPSSTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig == parts[2] { + t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) + } + } + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_test.go new file mode 100644 index 000000000..2e0f78536 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_test.go @@ -0,0 +1,176 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "strings" + "testing" +) + +var rsaTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic RS256", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + "RS256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic RS384", + "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", + "RS384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic RS512", + "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", + "RS512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic invalid: foo => bar", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + "RS256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestRSAVerify(t *testing.T) { + keyData, _ := ioutil.ReadFile("test/sample_key.pub") + key, _ := jwt.ParseRSAPublicKeyFromPEM(keyData) + + for _, data := range rsaTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestRSASign(t *testing.T) { + keyData, _ := ioutil.ReadFile("test/sample_key") + key, _ := jwt.ParseRSAPrivateKeyFromPEM(keyData) + + for _, data := range rsaTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), key) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} + +func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key.pub") + parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) + if err != nil { + t.Fatal(err) + } + testData := rsaTestData[0] + parts := strings.Split(testData.tokenString, ".") + err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) + if err != nil { + t.Errorf("[%v] Error while verifying key: %v", testData.name, err) + } +} + +func TestRSAWithPreParsedPrivateKey(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + t.Fatal(err) + } + testData := rsaTestData[0] + parts := strings.Split(testData.tokenString, ".") + sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", testData.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) + } +} + +func TestRSAKeyParsing(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key") + pubKey, _ := ioutil.ReadFile("test/sample_key.pub") + badKey := []byte("All your base are belong to key") + + // Test parsePrivateKey + if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { + t.Errorf("Failed to parse valid private key: %v", e) + } + + if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { + t.Errorf("Parsed public key as valid private key: %v", k) + } + + if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { + t.Errorf("Parsed invalid key as valid private key: %v", k) + } + + // Test parsePublicKey + if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { + t.Errorf("Failed to parse valid public key: %v", e) + } + + if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { + t.Errorf("Parsed private key as valid public key: %v", k) + } + + if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { + t.Errorf("Parsed invalid key as valid private key: %v", k) + } + +} + +func BenchmarkRS256Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) +} + +func BenchmarkRS384Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) +} + +func BenchmarkRS512Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 000000000..213a90dbb --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 000000000..ed1f212b2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem new file mode 100644 index 000000000..a6882b3e5 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 +AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM +cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem new file mode 100644 index 000000000..7191361e7 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK +RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem new file mode 100644 index 000000000..a86c823e5 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy +SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb +ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ +GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem new file mode 100644 index 000000000..e80d00564 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem @@ -0,0 +1,5 @@ +-----BEGIN PUBLIC KEY----- +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp +W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 +SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA +-----END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem new file mode 100644 index 000000000..213afaf13 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH +QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw +12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN +4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 +8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem new file mode 100644 index 000000000..02ea02203 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem @@ -0,0 +1,6 @@ +-----BEGIN PUBLIC KEY----- +MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh +ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 +7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w +9vakvpzjPXhkvoMt/Tk= +-----END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/helpers.go b/vendor/github.com/dgrijalva/jwt-go/test/helpers.go new file mode 100644 index 000000000..f84c3ef63 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/helpers.go @@ -0,0 +1,42 @@ +package test + +import ( + "crypto/rsa" + "github.com/dgrijalva/jwt-go" + "io/ioutil" +) + +func LoadRSAPrivateKeyFromDisk(location string) *rsa.PrivateKey { + keyData, e := ioutil.ReadFile(location) + if e != nil { + panic(e.Error()) + } + key, e := jwt.ParseRSAPrivateKeyFromPEM(keyData) + if e != nil { + panic(e.Error()) + } + return key +} + +func LoadRSAPublicKeyFromDisk(location string) *rsa.PublicKey { + keyData, e := ioutil.ReadFile(location) + if e != nil { + panic(e.Error()) + } + key, e := jwt.ParseRSAPublicKeyFromPEM(keyData) + if e != nil { + panic(e.Error()) + } + return key +} + +func MakeSampleToken(c jwt.Claims, key interface{}) string { + token := jwt.NewWithClaims(jwt.SigningMethodRS256, c) + s, e := token.SignedString(key) + + if e != nil { + panic(e.Error()) + } + + return s +} diff --git a/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey b/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey new file mode 100644 index 000000000..435b8ddb3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey @@ -0,0 +1 @@ +#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key b/vendor/github.com/dgrijalva/jwt-go/test/sample_key new file mode 100644 index 000000000..abdbade31 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/sample_key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn +SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i +cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC +PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR +ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA +Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 +n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy +MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 +POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE +KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM +IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn +FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY +mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj +FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U +I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs +2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn +/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT +OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 +EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ +hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 +4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb +mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry +eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 +CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ +9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub b/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub new file mode 100644 index 000000000..03dc982ac --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 +fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 +mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp +HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 +XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b +ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy +7wIDAQAB +-----END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 000000000..d637e0867 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml index 3cb77dc9a..4a237eafa 100644 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -1,7 +1,6 @@ sudo: false language: go go: - - 1.5.x - 1.6.x - 1.7.x - 1.8.x diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 15ebc8f72..2f0776bc3 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -1,3 +1,5 @@ +// +build go1.6 + // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may @@ -32,7 +34,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.37.0" + _VERSION = "1.38.1" ) // Version returns current package version literal. @@ -132,6 +134,8 @@ type LoadOptions struct { IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool @@ -157,7 +161,7 @@ type LoadOptions struct { // when value is NOT surrounded by any quotes. // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. UnescapeValueCommentSymbols bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string } @@ -200,7 +204,7 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { return LoadSources(LoadOptions{Insensitive: true}, source, others...) } -// InsensitiveLoad has exactly same functionality as Load function +// ShadowLoad has exactly same functionality as Load function // except it allows have shadow keys. func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { return LoadSources(LoadOptions{AllowShadows: true}, source, others...) diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go index d68c7caa3..9ff8f5899 100644 --- a/vendor/github.com/go-ini/ini/ini_test.go +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -363,6 +363,29 @@ key2=value2 #comment2`)) }) }) + Convey("Skip unrecognizable lines", func() { + f, err := ini.LoadSources(ini.LoadOptions{ + SkipUnrecognizableLines: true, + }, []byte(` +GenerationDepth: 13 + +BiomeRarityScale: 100 + +################ +# Biome Groups # +################ + +BiomeGroup(NormalBiomes, 3, 99, RoofedForestEnchanted, ForestSakura, FloatingJungle +BiomeGroup(IceBiomes, 4, 85, Ice Plains) +`)) + So(err, ShouldBeNil) + So(f, ShouldNotBeNil) + + So(f.Section("").Key("GenerationDepth").String(), ShouldEqual, "13") + So(f.Section("").Key("BiomeRarityScale").String(), ShouldEqual, "100") + So(f.Section("").HasKey("BiomeGroup"), ShouldBeFalse) + }) + Convey("Allow boolean type keys", func() { f, err := ini.LoadSources(ini.LoadOptions{ AllowPythonMultilineValues: true, @@ -764,7 +787,7 @@ GITHUB = U;n;k;n;w;o;n `)) So(err, ShouldBeNil) So(f, ShouldNotBeNil) - sec := f.Section("author") + sec := f.Section("author") nameValue := sec.Key("NAME").String() githubValue := sec.Key("GITHUB").String() So(nameValue, ShouldEqual, "U") diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index d5aa2db60..3daf54c38 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -339,8 +339,7 @@ func (f *File) parse(reader io.Reader) (err error) { // NOTE: Iterate and increase `currentPeekSize` until // the size of the parser buffer is found. - // TODO: When Golang 1.10 is the lowest version supported, - // replace with `parserBufferSize := p.buf.Size()`. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. parserBufferSize := 0 // NOTE: Peek 1kb at a time. currentPeekSize := 1024 @@ -390,8 +389,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Section if line[0] == '[' { // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) + closeIdx := bytes.LastIndexByte(line, ']') if closeIdx == -1 { return fmt.Errorf("unclosed section: %s", line) } @@ -433,25 +431,31 @@ func (f *File) parse(reader io.Reader) (err error) { kname, offset, err := readKeyName(line) if err != nil { // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err + if IsErrDelimiterNotFound(err) { + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue } return err } diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go index d8a402619..340a1efad 100644 --- a/vendor/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -82,6 +82,7 @@ func (s *Section) NewKey(name, val string) (*Key, error) { } } else { s.keys[name].value = val + s.keysHash[name] = val } return s.keys[name], nil } diff --git a/vendor/github.com/go-ini/ini/section_test.go b/vendor/github.com/go-ini/ini/section_test.go index e9c347881..fd5e43a37 100644 --- a/vendor/github.com/go-ini/ini/section_test.go +++ b/vendor/github.com/go-ini/ini/section_test.go @@ -272,24 +272,26 @@ func TestSection_KeyStrings(t *testing.T) { func TestSection_KeyHash(t *testing.T) { Convey("Get clone of key hash", t, func() { - f := ini.Empty() + f, err := ini.Load([]byte(` +key = one +[log] +name = app +file = a.log +`), []byte(` +key = two +[log] +name = app2 +file = b.log +`)) + So(err, ShouldBeNil) So(f, ShouldNotBeNil) - k, err := f.Section("").NewKey("NAME", "ini") - So(err, ShouldBeNil) - So(k, ShouldNotBeNil) - k, err = f.Section("").NewKey("VERSION", "v1") - So(err, ShouldBeNil) - So(k, ShouldNotBeNil) - k, err = f.Section("").NewKey("IMPORT_PATH", "gopkg.in/ini.v1") - So(err, ShouldBeNil) - So(k, ShouldNotBeNil) + So(f.Section("").Key("key").String(), ShouldEqual, "two") - hash := f.Section("").KeysHash() + hash := f.Section("log").KeysHash() relation := map[string]string{ - "NAME": "ini", - "VERSION": "v1", - "IMPORT_PATH": "gopkg.in/ini.v1", + "name": "app2", + "file": "b.log", } for k, v := range hash { So(v, ShouldEqual, relation[k]) diff --git a/vendor/github.com/go-openapi/analysis/.github/CONTRIBUTING.md b/vendor/github.com/go-openapi/analysis/.github/CONTRIBUTING.md new file mode 100644 index 000000000..7dea4240d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.github/CONTRIBUTING.md @@ -0,0 +1,117 @@ +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep go-swagger lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* go-swagger. + + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 000000000..c96f0b231 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,3 @@ +secrets.yml +coverage.out +.idea diff --git a/vendor/github.com/go-openapi/analysis/.travis.yml b/vendor/github.com/go-openapi/analysis/.travis.yml new file mode 100644 index 000000000..3aa42ab3a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.travis.yml @@ -0,0 +1,18 @@ +language: go +go: +- 1.7 +install: +- go get -u github.com/stretchr/testify/assert +- go get -u gopkg.in/yaml.v2 +- go get -u github.com/go-openapi/swag +- go get -u github.com/go-openapi/jsonpointer +- go get -u github.com/go-openapi/spec +- go get -u github.com/go-openapi/strfmt +- go get -u github.com/go-openapi/loads/fmts +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 000000000..d32c30d45 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,6 @@ +# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 000000000..77323a58e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,785 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + consumes: make(map[string]struct{}, 150), + produces: make(map[string]struct{}, 150), + authSchemes: make(map[string]struct{}, 150), + operations: make(map[string]map[string]*spec.Operation, 150), + allSchemas: make(map[string]SchemaRef, 150), + allOfs: make(map[string]SchemaRef, 150), + references: referenceAnalysis{ + schemas: make(map[string]spec.Ref, 150), + pathItems: make(map[string]spec.Ref, 150), + responses: make(map[string]spec.Ref, 150), + parameters: make(map[string]spec.Ref, 150), + items: make(map[string]spec.Ref, 150), + headerItems: make(map[string]spec.Ref, 150), + parameterItems: make(map[string]spec.Ref, 150), + allRefs: make(map[string]spec.Ref, 150), + }, + patterns: patternAnalysis{ + parameters: make(map[string]string, 150), + headers: make(map[string]string, 150), + items: make(map[string]string, 150), + schemas: make(map[string]string, 150), + allPatterns: make(map[string]string, 150), + }, + } + a.initialize() + return a +} + +// Spec takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", *parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", *response.Schema, refPref) + } + } + + for name, schema := range s.spec.Definitions { + s.analyzeSchema(name, schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } + if op.Responses != nil { + if op.Responses.Default != nil { + refPref := slashpath.Join(prefix, "responses", "default") + if op.Responses.Default.Ref.String() != "" { + s.references.addResponseRef(refPref, op.Responses.Default) + } + for k, v := range op.Responses.Default.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + if op.Responses.Default.Schema != nil { + s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) + } + } + for k, res := range op.Responses.StatusCodeResponses { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) + } + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + if res.Schema != nil { + s.analyzeSchema("schema", *res.Schema, refPref) + } + } + } +} + +func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: &schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + for k, v := range schema.Definitions { + s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) + } + for k, v := range schema.Properties { + s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) + } + for k, v := range schema.PatternProperties { + s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) + } + for i, v := range schema.AllOf { + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + for i, v := range schema.AnyOf { + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + for i, v := range schema.OneOf { + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + if schema.Not != nil { + s.analyzeSchema("not", *schema.Not, refURI) + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", *schema.Items.Schema, refURI) + } + for i, sch := range schema.Items.Schemas { + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + unique := make(map[string]SecurityRequirement) + for _, scheme := range schemes { + for k, v := range scheme { + if _, ok := unique[k]; !ok { + unique[k] = SecurityRequirement{Name: k, Scopes: v} + } + } + } + + var result []SecurityRequirement + for _, v := range unique { + result = append(result, v) + } + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + result := make(map[string]spec.SecurityScheme) + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + return swag.ToGoName(param.Name) +} + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) { + for _, param := range parameters { + pr := param + if pr.Ref.String() != "" { + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + panic(err) + } + pr = obj.(spec.Parameter) + } + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag) + s.paramsAsMap(op.Parameters, bag) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + return res + } + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) + } + } + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res) + } + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + return op, fn + } + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + return +} + +// AllItemsReferences returns the references for all the items +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + return +} + +// AllReferences returns all the references found in the document +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} diff --git a/vendor/github.com/go-openapi/analysis/analyzer_test.go b/vendor/github.com/go-openapi/analysis/analyzer_test.go new file mode 100644 index 000000000..70b80d7f9 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer_test.go @@ -0,0 +1,284 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "encoding/json" + "fmt" + "path/filepath" + "sort" + "testing" + + "github.com/go-openapi/loads/fmts" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" + "github.com/stretchr/testify/assert" +) + +func schemeNames(schemes []SecurityRequirement) []string { + var names []string + for _, v := range schemes { + names = append(names, v.Name) + } + sort.Sort(sort.StringSlice(names)) + return names +} + +func TestAnalyzer(t *testing.T) { + formatParam := spec.QueryParam("format").Typed("string", "") + + limitParam := spec.QueryParam("limit").Typed("integer", "int32") + limitParam.Extensions = spec.Extensions(map[string]interface{}{}) + limitParam.Extensions.Add("go-name", "Limit") + + skipParam := spec.QueryParam("skip").Typed("integer", "int32") + pi := spec.PathItem{} + pi.Parameters = []spec.Parameter{*limitParam} + + op := &spec.Operation{} + op.Consumes = []string{"application/x-yaml"} + op.Produces = []string{"application/x-yaml"} + op.Security = []map[string][]string{ + map[string][]string{"oauth2": []string{}}, + map[string][]string{"basic": nil}, + } + op.ID = "someOperation" + op.Parameters = []spec.Parameter{*skipParam} + pi.Get = op + + pi2 := spec.PathItem{} + pi2.Parameters = []spec.Parameter{*limitParam} + op2 := &spec.Operation{} + op2.ID = "anotherOperation" + op2.Parameters = []spec.Parameter{*skipParam} + pi2.Get = op2 + + spec := &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Consumes: []string{"application/json"}, + Produces: []string{"application/json"}, + Security: []map[string][]string{ + map[string][]string{"apikey": nil}, + }, + SecurityDefinitions: map[string]*spec.SecurityScheme{ + "basic": spec.BasicAuth(), + "apiKey": spec.APIKeyAuth("api_key", "query"), + "oauth2": spec.OAuth2AccessToken("http://authorize.com", "http://token.com"), + }, + Parameters: map[string]spec.Parameter{"format": *formatParam}, + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{ + "/": pi, + "/items": pi2, + }, + }, + }, + } + analyzer := New(spec) + + assert.Len(t, analyzer.consumes, 2) + assert.Len(t, analyzer.produces, 2) + assert.Len(t, analyzer.operations, 1) + assert.Equal(t, analyzer.operations["GET"]["/"], spec.Paths.Paths["/"].Get) + + expected := []string{"application/x-yaml"} + sort.Sort(sort.StringSlice(expected)) + consumes := analyzer.ConsumesFor(spec.Paths.Paths["/"].Get) + sort.Sort(sort.StringSlice(consumes)) + assert.Equal(t, expected, consumes) + + produces := analyzer.ProducesFor(spec.Paths.Paths["/"].Get) + sort.Sort(sort.StringSlice(produces)) + assert.Equal(t, expected, produces) + + expected = []string{"application/json"} + sort.Sort(sort.StringSlice(expected)) + consumes = analyzer.ConsumesFor(spec.Paths.Paths["/items"].Get) + sort.Sort(sort.StringSlice(consumes)) + assert.Equal(t, expected, consumes) + + produces = analyzer.ProducesFor(spec.Paths.Paths["/items"].Get) + sort.Sort(sort.StringSlice(produces)) + assert.Equal(t, expected, produces) + + expectedSchemes := []SecurityRequirement{SecurityRequirement{"oauth2", []string{}}, SecurityRequirement{"basic", nil}} + schemes := analyzer.SecurityRequirementsFor(spec.Paths.Paths["/"].Get) + assert.Equal(t, schemeNames(expectedSchemes), schemeNames(schemes)) + + securityDefinitions := analyzer.SecurityDefinitionsFor(spec.Paths.Paths["/"].Get) + assert.Equal(t, securityDefinitions["basic"], *spec.SecurityDefinitions["basic"]) + assert.Equal(t, securityDefinitions["oauth2"], *spec.SecurityDefinitions["oauth2"]) + + parameters := analyzer.ParamsFor("GET", "/") + assert.Len(t, parameters, 2) + + operations := analyzer.OperationIDs() + assert.Len(t, operations, 2) + + producers := analyzer.RequiredProduces() + assert.Len(t, producers, 2) + consumers := analyzer.RequiredConsumes() + assert.Len(t, consumers, 2) + authSchemes := analyzer.RequiredSecuritySchemes() + assert.Len(t, authSchemes, 3) + + ops := analyzer.Operations() + assert.Len(t, ops, 1) + assert.Len(t, ops["GET"], 2) + + op, ok := analyzer.OperationFor("get", "/") + assert.True(t, ok) + assert.NotNil(t, op) + + op, ok = analyzer.OperationFor("delete", "/") + assert.False(t, ok) + assert.Nil(t, op) +} + +func TestDefinitionAnalysis(t *testing.T) { + doc, err := loadSpec(filepath.Join("fixtures", "definitions.yml")) + if assert.NoError(t, err) { + analyzer := New(doc) + definitions := analyzer.allSchemas + // parameters + assertSchemaRefExists(t, definitions, "#/parameters/someParam/schema") + assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/parameters/1/schema") + assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/parameters/1/schema") + // responses + assertSchemaRefExists(t, definitions, "#/responses/someResponse/schema") + assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/responses/default/schema") + assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/responses/200/schema") + // definitions + assertSchemaRefExists(t, definitions, "#/definitions/tag") + assertSchemaRefExists(t, definitions, "#/definitions/tag/properties/id") + assertSchemaRefExists(t, definitions, "#/definitions/tag/properties/value") + assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category") + assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category/properties/id") + assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category/properties/value") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalProps") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalProps/additionalProperties") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/items/0") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/items/1") + assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/additionalItems") + assertSchemaRefExists(t, definitions, "#/definitions/withNot") + assertSchemaRefExists(t, definitions, "#/definitions/withNot/not") + assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf") + assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf/anyOf/0") + assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf/anyOf/1") + assertSchemaRefExists(t, definitions, "#/definitions/withAllOf") + assertSchemaRefExists(t, definitions, "#/definitions/withAllOf/allOf/0") + assertSchemaRefExists(t, definitions, "#/definitions/withAllOf/allOf/1") + allOfs := analyzer.allOfs + assert.Len(t, allOfs, 1) + assert.Contains(t, allOfs, "#/definitions/withAllOf") + } +} + +func loadSpec(path string) (*spec.Swagger, error) { + spec.PathLoader = func(path string) (json.RawMessage, error) { + ext := filepath.Ext(path) + if ext == ".yml" || ext == ".yaml" { + return fmts.YAMLDoc(path) + } + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } + data, err := fmts.YAMLDoc(path) + if err != nil { + return nil, err + } + + var sw spec.Swagger + if err := json.Unmarshal(data, &sw); err != nil { + return nil, err + } + return &sw, nil +} + +func TestReferenceAnalysis(t *testing.T) { + doc, err := loadSpec(filepath.Join("fixtures", "references.yml")) + if assert.NoError(t, err) { + definitions := New(doc).references + + // parameters + assertRefExists(t, definitions.parameters, "#/paths/~1some~1where~1{id}/parameters/0") + assertRefExists(t, definitions.parameters, "#/paths/~1some~1where~1{id}/get/parameters/0") + + // path items + assertRefExists(t, definitions.pathItems, "#/paths/~1other~1place") + + // responses + assertRefExists(t, definitions.responses, "#/paths/~1some~1where~1{id}/get/responses/404") + + // definitions + assertRefExists(t, definitions.schemas, "#/responses/notFound/schema") + assertRefExists(t, definitions.schemas, "#/paths/~1some~1where~1{id}/get/responses/200/schema") + assertRefExists(t, definitions.schemas, "#/definitions/tag/properties/audit") + + // items + assertRefExists(t, definitions.allRefs, "#/paths/~1some~1where~1{id}/get/parameters/1/items") + } +} + +func assertRefExists(t testing.TB, data map[string]spec.Ref, key string) bool { + if _, ok := data[key]; !ok { + return assert.Fail(t, fmt.Sprintf("expected %q to exist in the ref bag", key)) + } + return true +} + +func assertSchemaRefExists(t testing.TB, data map[string]SchemaRef, key string) bool { + if _, ok := data[key]; !ok { + return assert.Fail(t, fmt.Sprintf("expected %q to exist in schema ref bag", key)) + } + return true +} + +func TestPatternAnalysis(t *testing.T) { + doc, err := loadSpec(filepath.Join("fixtures", "patterns.yml")) + if assert.NoError(t, err) { + pt := New(doc).patterns + + // parameters + assertPattern(t, pt.parameters, "#/parameters/idParam", "a[A-Za-Z0-9]+") + assertPattern(t, pt.parameters, "#/paths/~1some~1where~1{id}/parameters/1", "b[A-Za-z0-9]+") + assertPattern(t, pt.parameters, "#/paths/~1some~1where~1{id}/get/parameters/0", "[abc][0-9]+") + + // responses + assertPattern(t, pt.headers, "#/responses/notFound/headers/ContentLength", "[0-9]+") + assertPattern(t, pt.headers, "#/paths/~1some~1where~1{id}/get/responses/200/headers/X-Request-Id", "d[A-Za-z0-9]+") + + // definitions + assertPattern(t, pt.schemas, "#/paths/~1other~1place/post/parameters/0/schema/properties/value", "e[A-Za-z0-9]+") + assertPattern(t, pt.schemas, "#/paths/~1other~1place/post/responses/200/schema/properties/data", "[0-9]+[abd]") + assertPattern(t, pt.schemas, "#/definitions/named", "f[A-Za-z0-9]+") + assertPattern(t, pt.schemas, "#/definitions/tag/properties/value", "g[A-Za-z0-9]+") + + // items + assertPattern(t, pt.items, "#/paths/~1some~1where~1{id}/get/parameters/1/items", "c[A-Za-z0-9]+") + assertPattern(t, pt.items, "#/paths/~1other~1place/post/responses/default/headers/Via/items", "[A-Za-z]+") + } +} + +func assertPattern(t testing.TB, data map[string]string, key, pattern string) bool { + if assert.Contains(t, data, key) { + return assert.Equal(t, pattern, data[key]) + } + return false +} diff --git a/vendor/github.com/go-openapi/analysis/fixtures/allOf.yml b/vendor/github.com/go-openapi/analysis/fixtures/allOf.yml new file mode 100644 index 000000000..531c33aa6 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/allOf.yml @@ -0,0 +1,43 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: allOf analysis +paths: + "/some/where/{id}": + parameters: + - name: id + in: path + type: integer + format: int32 + - name: bodyId + in: body + schema: + type: object + get: + parameters: + - name: limit + in: query + type: integer + format: int32 + required: false + - name: body + in: body + schema: + type: object + responses: + default: + schema: + type: object + 200: + schema: + type: object +definitions: + tag: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string diff --git a/vendor/github.com/go-openapi/analysis/fixtures/bar-crud.yml b/vendor/github.com/go-openapi/analysis/fixtures/bar-crud.yml new file mode 100644 index 000000000..90000f46b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/bar-crud.yml @@ -0,0 +1,180 @@ +--- +swagger: '2.0' +info: + title: bar CRUD API + version: 4.2.0 +schemes: + - http +basePath: /api +consumes: + - application/json +produces: + - application/json +paths: + /common: + get: + operationId: commonGet + summary: here to test path collisons + responses: + '200': + description: OK + schema: + $ref: "#/definitions/bar" + /bars: + post: + operationId: create + summary: Create a new bar + parameters: + - name: info + in: body + schema: + $ref: "#/definitions/bar" + responses: + '201': + description: created + schema: + $ref: "#/definitions/barId" + default: + description: error + schema: + $ref: "#/definitions/error" + /bars/{barid}: + get: + operationId: get + summary: Get a bar by id + parameters: + - $ref: "#/parameters/barid" + responses: + '200': + description: OK + schema: + $ref: "#/definitions/bar" + '401': + $ref: "#/responses/401" + '404': + $ref: "#/responses/404" + default: + description: error + schema: + $ref: "#/definitions/error" + delete: + operationId: delete + summary: delete a bar by id + parameters: + - name: barid + in: path + required: true + type: string + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + post: + operationId: update + summary: update a bar by id + parameters: + - name: barid + in: path + required: true + type: string + - name: info + in: body + schema: + $ref: "#/definitions/bar" + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + +definitions: + common: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + bar: + type: object + required: + - name + - description + properties: + id: + type: string + format: string + readOnly: true + name: + type: string + format: string + minLength: 1 + description: + type: string + format: string + minLength: 1 + barId: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + error: + type: object + required: + - message + properties: + code: + type: string + format: string + message: + type: string + fields: + type: string + +parameters: + common: + name: common + in: query + type: string + barid: + name: barid + in: path + required: true + type: string + +responses: + 401: + description: bar unauthorized + schema: + $ref: "#/definitions/error" + 404: + description: bar resource not found + schema: + $ref: "#/definitions/error" diff --git a/vendor/github.com/go-openapi/analysis/fixtures/definitions.yml b/vendor/github.com/go-openapi/analysis/fixtures/definitions.yml new file mode 100644 index 000000000..63cb17a65 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/definitions.yml @@ -0,0 +1,86 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: Definition analysis +parameters: + someParam: + name: someParam + in: body + schema: + type: object +responses: + someResponse: + schema: + type: object +paths: + "/some/where/{id}": + parameters: + - name: id + in: path + type: integer + format: int32 + - name: bodyId + in: body + schema: + type: object + get: + parameters: + - name: limit + in: query + type: integer + format: int32 + required: false + - name: body + in: body + schema: + type: object + responses: + default: + schema: + type: object + 200: + schema: + type: object +definitions: + tag: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + definitions: + category: + type: object + properties: + id: + type: integer + format: int32 + value: + type: string + withAdditionalProps: + type: object + additionalProperties: + type: boolean + withAdditionalItems: + type: array + items: + - type: string + - type: bool + additionalItems: + type: integer + format: int32 + withNot: + type: object + not: + $ref: "#/definitions/tag" + withAnyOf: + anyOf: + - type: object + - type: string + withAllOf: + allOf: + - type: object + - type: string diff --git a/vendor/github.com/go-openapi/analysis/fixtures/empty-paths.json b/vendor/github.com/go-openapi/analysis/fixtures/empty-paths.json new file mode 100644 index 000000000..30e600923 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/empty-paths.json @@ -0,0 +1,8 @@ +{ + "swagger": "2.0", + "info": { + "title": "empty-paths", + "version": "79.2.1" + }, + "paths": {} +} diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/definitions.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/definitions.yml new file mode 100644 index 000000000..009cba27c --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/definitions.yml @@ -0,0 +1,50 @@ +definitions: + named: + type: string + tag: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + audit: + $ref: "#/definitions/record" + record: + type: object + properties: + createdAt: + type: string + format: date-time + + nestedThing: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + additionalProperties: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + properties: + value: + type: string + name: + $ref: "definitions2.yml#/coordinate" \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/definitions2.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/definitions2.yml new file mode 100644 index 000000000..08f70c0ea --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/definitions2.yml @@ -0,0 +1,9 @@ +coordinate: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/errors.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/errors.yml new file mode 100644 index 000000000..d37a1bdd4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/errors.yml @@ -0,0 +1,13 @@ +error: + type: object + required: + - id + - message + properties: + id: + type: integer + format: int64 + readOnly: true + message: + type: string + readOnly: true diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/nestedParams.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/nestedParams.yml new file mode 100644 index 000000000..c11c0d8f0 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/nestedParams.yml @@ -0,0 +1,35 @@ +bodyParam: + name: body + in: body + schema: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/nestedResponses.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/nestedResponses.yml new file mode 100644 index 000000000..2d6583c05 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/nestedResponses.yml @@ -0,0 +1,32 @@ +genericResponse: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/parameters.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/parameters.yml new file mode 100644 index 000000000..b31a4485a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/parameters.yml @@ -0,0 +1,12 @@ +parameters: + idParam: + name: id + in: path + type: integer + format: int32 + limitParam: + name: limit + in: query + type: integer + format: int32 + required: false \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/pathItem.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/pathItem.yml new file mode 100644 index 000000000..22a7aa634 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/pathItem.yml @@ -0,0 +1,9 @@ +get: + operationId: modelOp + summary: many model variations + description: Used to see if a codegen can render all the possible parameter variations for a header param + tags: + - testcgen + responses: + default: + description: Generic Out \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external/responses.yml b/vendor/github.com/go-openapi/analysis/fixtures/external/responses.yml new file mode 100644 index 000000000..8730d64c1 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external/responses.yml @@ -0,0 +1,4 @@ +responses: + notFound: + schema: + $ref: "errors.yml#/error" \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/external_definitions.yml b/vendor/github.com/go-openapi/analysis/fixtures/external_definitions.yml new file mode 100644 index 000000000..032f2f320 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/external_definitions.yml @@ -0,0 +1,95 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + someParam: + name: someParam + in: body + schema: + $ref: "external/definitions.yml#/definitions/record" +responses: + someResponse: + schema: + $ref: "external/definitions.yml#/definitions/record" +paths: + "/some/where/{id}": + parameters: + - $ref: "external/parameters.yml#/parameters/idParam" + + - name: bodyId + in: body + schema: + $ref: "external/definitions.yml#/definitions/record" + get: + parameters: + - $ref: "external/parameters.yml#/parameters/limitParam" + - name: other + in: query + type: array + items: + $ref: "external/definitions.yml#/definitions/named" + - name: body + in: body + schema: + $ref: "external/definitions.yml#/definitions/record" + responses: + default: + schema: + $ref: "external/definitions.yml#/definitions/record" + 404: + $ref: "external/responses.yml#/responses/notFound" + 200: + schema: + $ref: "external/definitions.yml#/definitions/tag" + "/other/place": + $ref: "external/pathItem.yml" + +definitions: + namedAgain: + $ref: "external/definitions.yml#/definitions/named" + + datedTag: + allOf: + - type: string + format: date + - $ref: "external/definitions.yml#/definitions/tag" + + records: + type: array + items: + - $ref: "external/definitions.yml#/definitions/record" + + datedRecords: + type: array + items: + - type: string + format: date-time + - $ref: "external/definitions.yml#/definitions/record" + + datedTaggedRecords: + type: array + items: + - type: string + format: date-time + - $ref: "external/definitions.yml#/definitions/record" + additionalItems: + $ref: "external/definitions.yml#/definitions/tag" + + otherRecords: + type: array + items: + $ref: "external/definitions.yml#/definitions/record" + + tags: + type: object + additionalProperties: + $ref: "external/definitions.yml#/definitions/tag" + + namedThing: + type: object + properties: + name: + $ref: "external/definitions.yml#/definitions/named" \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/flatten.yml b/vendor/github.com/go-openapi/analysis/fixtures/flatten.yml new file mode 100644 index 000000000..d3bd8d9a5 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/flatten.yml @@ -0,0 +1,85 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + someParam: + name: some + in: query + type: string +responses: + notFound: + description: "Not Found" + schema: + $ref: "external/errors.yml#/error" + +paths: + "/some/where/{id}": + parameters: + - $ref: "external/parameters.yml#/parameters/idParam" + + get: + parameters: + - $ref: "external/parameters.yml#/parameters/limitParam" + - $ref: "#/parameters/someParam" + - name: other + in: query + type: string + - $ref: "external/nestedParams.yml#/bodyParam" + + responses: + default: + $ref: "external/nestedResponses.yml#/genericResponse" + 404: + $ref: "#/responses/notFound" + 200: + description: "RecordHolder" + schema: + type: object + properties: + record: + $ref: "external/definitions.yml#/definitions/nestedThing" + "/other/place": + $ref: "external/pathItem.yml" + +definitions: + namedAgain: + $ref: "external/definitions.yml#/definitions/named" + + datedTag: + allOf: + - type: string + format: date + - $ref: "external/definitions.yml#/definitions/tag" + + records: + type: array + items: + - $ref: "external/definitions.yml#/definitions/record" + + datedRecords: + type: array + items: + - type: string + format: date-time + - $ref: "external/definitions.yml#/definitions/record" + + otherRecords: + type: array + items: + $ref: "external/definitions.yml#/definitions/record" + + tags: + type: object + additionalProperties: + $ref: "external/definitions.yml#/definitions/tag" + + namedThing: + type: object + properties: + name: + $ref: "external/definitions.yml#/definitions/named" + namedAgain: + $ref: "#/definitions/namedAgain" diff --git a/vendor/github.com/go-openapi/analysis/fixtures/foo-crud.yml b/vendor/github.com/go-openapi/analysis/fixtures/foo-crud.yml new file mode 100644 index 000000000..2e3d1f60f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/foo-crud.yml @@ -0,0 +1,180 @@ +--- +swagger: '2.0' +info: + title: foo CRUD API + version: 4.2.0 +schemes: + - http +basePath: /api +consumes: + - application/json +produces: + - application/json +paths: + /common: + get: + operationId: commonGet + summary: here to test path collisons + responses: + '200': + description: OK + schema: + $ref: "#/definitions/foo" + /foos: + post: + operationId: create + summary: Create a new foo + parameters: + - name: info + in: body + schema: + $ref: "#/definitions/foo" + responses: + '201': + description: created + schema: + $ref: "#/definitions/fooId" + default: + description: error + schema: + $ref: "#/definitions/error" + /foos/{fooid}: + get: + operationId: get + summary: Get a foo by id + parameters: + - $ref: "#/parameters/fooid" + responses: + '200': + description: OK + schema: + $ref: "#/definitions/foo" + '401': + $ref: "#/responses/401" + '404': + $ref: "#/responses/404" + default: + description: error + schema: + $ref: "#/definitions/error" + delete: + operationId: delete + summary: delete a foo by id + parameters: + - name: fooid + in: path + required: true + type: string + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + post: + operationId: update + summary: update a foo by id + parameters: + - name: fooid + in: path + required: true + type: string + - name: info + in: body + schema: + $ref: "#/definitions/foo" + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + +definitions: + common: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + foo: + type: object + required: + - name + - description + properties: + id: + type: string + format: string + readOnly: true + name: + type: string + format: string + minLength: 1 + description: + type: string + format: string + minLength: 1 + fooId: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + error: + type: object + required: + - message + properties: + code: + type: string + format: string + message: + type: string + fields: + type: string + +parameters: + common: + name: common + in: query + type: string + fooid: + name: fooid + in: path + required: true + type: string + +responses: + 401: + description: foo unauthorized + schema: + $ref: "#/definitions/error" + 404: + description: foo resource not found + schema: + $ref: "#/definitions/error" diff --git a/vendor/github.com/go-openapi/analysis/fixtures/inline_schemas.yml b/vendor/github.com/go-openapi/analysis/fixtures/inline_schemas.yml new file mode 100644 index 000000000..59699571e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/inline_schemas.yml @@ -0,0 +1,187 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + someParam: + name: someParam + in: body + schema: + type: object + properties: + createdAt: + type: string + format: date-time +responses: + someResponse: + schema: + type: object + properties: + createdAt: + type: string + format: date-time +paths: + "/some/where/{id}": + parameters: + - name: id + in: path + type: integer + format: int32 + + - name: bodyId + in: body + schema: + type: object + properties: + createdAt: + type: string + format: date-time + post: + responses: + default: + description: all good + get: + parameters: + - name: limit + in: query + type: integer + format: int32 + required: false + - name: other + in: query + type: array + items: + type: object + properties: + id: + type: integer + format: int64 + - name: body + in: body + schema: + type: object + properties: + createdAt: + type: string + format: date-time + responses: + default: + schema: + type: object + properties: + createdAt: + type: string + format: date-time + 404: + schema: + $ref: "errors.yml#/error" + 200: + schema: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + "/other/place": + $ref: "external/pathItem.yml" + +definitions: + namedAgain: + type: object + properties: + id: + type: integer + format: int64 + + datedTag: + allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + records: + type: array + items: + - type: object + properties: + createdAt: + type: string + format: date-time + + datedRecords: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + + datedTaggedRecords: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + additionalItems: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + otherRecords: + type: array + items: + type: object + properties: + createdAt: + type: string + format: date-time + + tags: + type: object + additionalProperties: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + namedThing: + type: object + properties: + name: + type: object + properties: + id: + type: integer + format: int64 + + # depth first should have this at the bottom, it's just a very long name + pneumonoultramicroscopicsilicovolcanoconiosisAntidisestablishmentarianism: + type: object + properties: + floccinaucinihilipilificationCreatedAt: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/analysis/fixtures/nested_inline_schemas.yml b/vendor/github.com/go-openapi/analysis/fixtures/nested_inline_schemas.yml new file mode 100644 index 000000000..6acf3b340 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/nested_inline_schemas.yml @@ -0,0 +1,298 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + someParam: + name: someParam + in: body + schema: + type: object + properties: + createdAt: + type: string + format: date-time +responses: + someResponse: + schema: + type: object + properties: + createdAt: + type: string + format: date-time +paths: + "/some/where/{id}": + parameters: + - name: id + in: path + type: integer + format: int32 + + - name: bodyId + in: body + schema: + type: array + items: + type: object + properties: + createdAt: + type: string + format: date-time + post: + responses: + default: + description: all good + get: + parameters: + - name: limit + in: query + type: integer + format: int32 + required: false + - name: other + in: query + type: array + items: + type: object + properties: + id: + type: integer + format: int64 + - name: body + in: body + schema: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time + responses: + default: + schema: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time + 404: + schema: + $ref: "errors.yml#/error" + 200: + schema: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time + "/other/place": + $ref: "external/pathItem.yml" + +definitions: + namedAgain: + type: object + properties: + id: + type: integer + format: int64 + + datedTag: + allOf: + - type: string + format: date + - type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + records: + type: array + items: + - type: object + properties: + createdAt: + type: string + format: date-time + + datedRecords: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + + datedTaggedRecords: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + additionalItems: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + otherRecords: + type: array + items: + type: object + properties: + createdAt: + type: string + format: date-time + + tags: + type: object + additionalProperties: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + + namedThing: + type: object + properties: + name: + type: object + properties: + id: + type: integer + format: int64 + + nestedThing: + type: object + properties: + record: + type: array + items: + - type: string + format: date-time + - type: object + properties: + createdAt: + type: string + format: date-time + - allOf: + - type: string + format: date + - type: object + additionalProperties: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + properties: + id: + type: integer + format: int64 + value: + type: string + name: + type: object + properties: + id: + type: integer + format: int64 + createdAt: + type: string + format: date-time \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/fixtures/no-paths.yml b/vendor/github.com/go-openapi/analysis/fixtures/no-paths.yml new file mode 100644 index 000000000..783f59c42 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/no-paths.yml @@ -0,0 +1,38 @@ +--- +swagger: '2.0' +info: + title: no paths API + version: 4.1.7 +schemes: + - http +basePath: /wooble +consumes: + - application/json +produces: + - application/json +paths: +definitions: + common: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 +parameters: + common: + name: common + in: query + type: string + +responses: + 401: + description: bar unauthorized + schema: + $ref: "#/definitions/error" + 404: + description: bar resource not found + schema: + $ref: "#/definitions/error" diff --git a/vendor/github.com/go-openapi/analysis/fixtures/patterns.yml b/vendor/github.com/go-openapi/analysis/fixtures/patterns.yml new file mode 100644 index 000000000..3fe1ab516 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/patterns.yml @@ -0,0 +1,124 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + idParam: + name: id + in: path + type: string + pattern: 'a[A-Za-Z0-9]+' + +responses: + notFound: + headers: + ContentLength: + type: string + pattern: '[0-9]+' + schema: + $ref: "#/definitions/error" + +paths: + "/some/where/{id}": + parameters: + - $ref: "#/parameters/idParam" + - name: name + in: query + pattern: 'b[A-Za-z0-9]+' + - name: bodyId + in: body + schema: + type: object + get: + parameters: + - name: filter + in: query + type: string + pattern: "[abc][0-9]+" + - name: other + in: query + type: array + items: + type: string + pattern: 'c[A-Za-z0-9]+' + - name: body + in: body + schema: + type: object + + responses: + default: + schema: + type: object + 404: + $ref: "#/responses/notFound" + 200: + headers: + X-Request-Id: + type: string + pattern: 'd[A-Za-z0-9]+' + schema: + $ref: "#/definitions/tag" + "/other/place": + post: + parameters: + - name: body + in: body + schema: + type: object + properties: + value: + type: string + pattern: 'e[A-Za-z0-9]+' + responses: + default: + headers: + Via: + type: array + items: + type: string + pattern: '[A-Za-z]+' + 200: + schema: + type: object + properties: + data: + type: string + pattern: "[0-9]+[abd]" + +definitions: + named: + type: string + pattern: 'f[A-Za-z0-9]+' + tag: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + pattern: 'g[A-Za-z0-9]+' + audit: + $ref: "#/definitions/record" + record: + type: object + properties: + createdAt: + type: string + format: date-time + error: + type: object + required: + - id + - message + properties: + id: + type: integer + format: int64 + readOnly: true + message: + type: string + readOnly: true diff --git a/vendor/github.com/go-openapi/analysis/fixtures/references.yml b/vendor/github.com/go-openapi/analysis/fixtures/references.yml new file mode 100644 index 000000000..b57176635 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/references.yml @@ -0,0 +1,89 @@ +--- +swagger: "2.0" +info: + version: "0.1.0" + title: reference analysis + +parameters: + idParam: + name: id + in: path + type: integer + format: int32 + limitParam: + name: limit + in: query + type: integer + format: int32 + required: false + +responses: + notFound: + schema: + $ref: "#/definitions/error" + +paths: + "/some/where/{id}": + parameters: + - $ref: "#/parameters/idParam" + + - name: bodyId + in: body + schema: + type: object + get: + parameters: + - $ref: "#/parameters/limitParam" + - name: other + in: query + type: array + items: + $ref: "#/definitions/named" + - name: body + in: body + schema: + type: object + responses: + default: + schema: + type: object + 404: + $ref: "#/responses/notFound" + 200: + schema: + $ref: "#/definitions/tag" + "/other/place": + $ref: "#/x-shared-path/getItems" + +definitions: + named: + type: string + tag: + type: object + properties: + id: + type: integer + format: int64 + value: + type: string + audit: + $ref: "#/definitions/record" + record: + type: object + properties: + createdAt: + type: string + format: date-time + error: + type: object + required: + - id + - message + properties: + id: + type: integer + format: int64 + readOnly: true + message: + type: string + readOnly: true diff --git a/vendor/github.com/go-openapi/analysis/fixtures/widget-crud.yml b/vendor/github.com/go-openapi/analysis/fixtures/widget-crud.yml new file mode 100644 index 000000000..d1acb9277 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixtures/widget-crud.yml @@ -0,0 +1,181 @@ +--- +swagger: '2.0' +info: + title: widget CRUD API + version: 4.2.0 +schemes: + - http +basePath: /api +consumes: + - application/json +produces: + - application/json +paths: + /common: + get: + operationId: commonGet + summary: here to test path collisons + responses: + '200': + description: OK + schema: + $ref: "#/definitions/widget" + + /widgets: + post: + operationId: create + summary: Create a new widget + parameters: + - name: info + in: body + schema: + $ref: "#/definitions/widget" + responses: + '201': + description: created + schema: + $ref: "#/definitions/widgetId" + default: + description: error + schema: + $ref: "#/definitions/error" + /widgets/{widgetid}: + get: + operationId: get + summary: Get a widget by id + parameters: + - $ref: "#/parameters/widgetid" + responses: + '200': + description: OK + schema: + $ref: "#/definitions/widget" + '401': + $ref: "#/responses/401" + '404': + $ref: "#/responses/404" + default: + description: error + schema: + $ref: "#/definitions/error" + delete: + operationId: delete + summary: delete a widget by id + parameters: + - name: widgetid + in: path + required: true + type: string + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + post: + operationId: update + summary: update a widget by id + parameters: + - name: widgetid + in: path + required: true + type: string + - name: info + in: body + schema: + $ref: "#/definitions/widget" + responses: + '200': + description: OK + '401': + description: unauthorized + schema: + $ref: "#/definitions/error" + '404': + description: resource not found + schema: + $ref: "#/definitions/error" + default: + description: error + schema: + $ref: "#/definitions/error" + +definitions: + common: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + widget: + type: object + required: + - name + - description + properties: + id: + type: string + format: string + readOnly: true + name: + type: string + format: string + minLength: 1 + description: + type: string + format: string + minLength: 1 + widgetId: + type: object + required: + - id + properties: + id: + type: string + format: string + minLength: 1 + error: + type: object + required: + - message + properties: + code: + type: string + format: string + message: + type: string + fields: + type: string + +parameters: + common: + name: common + in: query + type: string + widgetid: + name: widgetid + in: path + required: true + type: string + +responses: + 401: + description: widget unauthorized + schema: + $ref: "#/definitions/error" + 404: + description: widget resource not found + schema: + $ref: "#/definitions/error" diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 000000000..23d7242f4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,756 @@ +package analysis + +import ( + "fmt" + "log" + "net/http" + "path" + "sort" + "strings" + + "strconv" + + "github.com/go-openapi/jsonpointer" + swspec "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// FlattenOpts configuration for flattening a swagger specification. +type FlattenOpts struct { + Spec *Spec + BasePath string + + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions { + return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas} +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *swspec.Swagger { + return f.Spec.spec +} + +// Flatten an analyzed spec. +// +// To flatten a spec means: +// +// Expand the parameters, responses, path items, parameter items and header items. +// Import external (http, file) references so they become internal to the document. +// Move every inline schema to be a definition with an auto-generated name in a depth-first fashion. +// Rewritten schemas get a vendor extension x-go-gen-location so we know in which package they need to be rendered. +func Flatten(opts FlattenOpts) error { + // recursively expand responses, parameters, path items and items + err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(true)) + if err != nil { + return err + } + opts.Spec.reload() // re-analyze + + // at this point there are no other references left but schemas + if err := importExternalReferences(&opts); err != nil { + return err + } + opts.Spec.reload() // re-analyze + + // rewrite the inline schemas (schemas that aren't simple types or arrays of simple types) + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + opts.Spec.reload() // re-analyze + + // TODO: simplifiy known schema patterns to flat objects with properties? + return nil +} + +func nameInlinedSchemas(opts *FlattenOpts) error { + namer := &inlineSchemaNamer{Spec: opts.Swagger(), Operations: opRefsByRef(gatherOperations(opts.Spec, nil))} + depthFirst := sortDepthFirst(opts.Spec.allSchemas) + + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %v", sch.Ref.String(), err) + } + + if !asch.IsSimpleSchema { // complex schemas get moved + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + } + return nil +} + +var depthGroupOrder = []string{"sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition"} + +func sortDepthFirst(data map[string]SchemaRef) (sorted []string) { + // group by category (shared params, op param, statuscode response, default response, definitions) + // sort groups internally by number of parts in the key and lexical names + // flatten groups into a single list of keys + grouped := make(map[string]keys, len(data)) + for k := range data { + split := keyParts(k) + var pk string + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + grouped[pk] = append(grouped[pk], key{len(split), k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return +} + +type key struct { + Segments int + Key string +} +type keys []key + +func (k keys) Len() int { return len(k) } +func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +type inlineSchemaNamer struct { + Spec *swspec.Swagger + Operations map[string]opRef +} + +func opRefsByRef(oprefs map[string]opRef) map[string]opRef { + result := make(map[string]opRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + return result +} + +func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error { + if swspec.Debug { + log.Printf("naming inlined schema at %s", key) + } + + parts := keyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name != "" { + // create unique name + newName := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + + // clone schema + sch, err := cloneSchema(schema) + if err != nil { + return err + } + + // replace values on schema + if err := rewriteSchemaToRef(isn.Spec, key, swspec.MustCreateRef("#/definitions/"+newName)); err != nil { + return fmt.Errorf("name inlined schema: %v", err) + } + + sch.AddExtension("x-go-gen-location", genLocation(parts)) + // fmt.Printf("{\n %q,\n \"\",\n spec.MustCreateRef(%q),\n \"\",\n},\n", key, "#/definitions/"+newName) + // save cloned schema to definitions + saveSchema(isn.Spec, newName, sch) + } + } + return nil +} + +func genLocation(parts splitKey) string { + if parts.IsOperation() { + return "operations" + } + if parts.IsDefinition() { + return "models" + } + return "" +} + +func uniqifyName(definitions swspec.Definitions, name string) string { + if name == "" { + name = "oaiGen" + } + if len(definitions) == 0 { + return name + } + + if _, ok := definitions[name]; !ok { + return name + } + name += "OAIGen" + var idx int + unique := name + _, known := definitions[unique] + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + return unique +} + +func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string { + var baseNames [][]string + var startIndex int + if parts.IsOperation() { + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + } + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + } + + // definitions + if parts.IsDefinition() { + nm := parts.DefinitionName() + if nm != "" { + startIndex = 2 + baseNames = append(baseNames, []string{parts.DefinitionName()}) + } + } + + var result []string + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, aschema) + if nm != "" { + result = append(result, nm) + } + } + sort.Strings(result) + return result +} + +const ( + pths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ignoredKeys map[string]struct{} + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } +} + +type splitKey []string + +func (s splitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +func (s splitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + return s[1] +} + +func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string { + for _, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored { + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + if part == "additionalItems" { + segments = append(segments, part) + } + continue + } + segments = append(segments, part) + } + } + return strings.Join(segments, " ") +} + +func (s splitKey) IsOperation() bool { + return len(s) > 1 && s[0] == pths +} + +func (s splitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == pths && s[2] == parameters +} + +func (s splitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == pths && s[3] == parameters +} + +func (s splitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == pths && s[3] == responses +} + +func (s splitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == pths && s[3] == responses && s[4] == "default" +} + +func (s splitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + return err == nil + } + return len(s) > 4 && s[0] == pths && s[3] == responses && isInt() +} + +func (s splitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + return http.StatusText(code) + } + if s.IsDefaultResponse() { + return "Default" + } + return "" +} + +var validMethods map[string]struct{} + +func init() { + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +func (s splitKey) PathItemRef() swspec.Ref { + if len(s) < 3 { + return swspec.Ref{} + } + pth, method := s[1], s[2] + if _, validMethod := validMethods[strings.ToUpper(method)]; !validMethod && !strings.HasPrefix(method, "x-") { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + path.Join("/", pths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +func (s splitKey) PathRef() swspec.Ref { + if !s.IsOperation() { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + path.Join("/", pths, jsonpointer.Escape(s[1]))) +} + +func keyParts(key string) splitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + return res +} + +func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + if swspec.Debug { + log.Printf("rewriting schema to ref for %s with %s", key, ref.String()) + } + pth := key[1:] + ptr, err := jsonpointer.New(pth) + if err != nil { + return err + } + + value, _, err := ptr.Get(spec) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + return rewriteParentRef(spec, key, ref) + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + case swspec.Schema: + return rewriteParentRef(spec, key, ref) + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + pth := key[1:] + parent, entry := path.Dir(pth), path.Base(pth) + if swspec.Debug { + log.Println("getting schema holder at:", parent) + } + + pptr, err := jsonpointer.New(parent) + if err != nil { + return err + } + pvalue, _, err := pptr.Get(spec) + if err != nil { + return fmt.Errorf("can't get parent for %s: %v", parent, err) + } + if swspec.Debug { + log.Printf("rewriting holder for %T", pvalue) + } + + switch container := pvalue.(type) { + case swspec.Response: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case *swspec.Response: + container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]swspec.Response: + resp := container[entry] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = resp + + case swspec.Parameter: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case map[string]swspec.Parameter: + param := container[entry] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = param + + case []swspec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + param := container[idx] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[idx] = param + + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + return nil +} + +func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) { + var sch swspec.Schema + if err := swag.FromDynamicJSON(schema, &sch); err != nil { + return nil, fmt.Errorf("name inlined schema: %v", err) + } + return &sch, nil +} + +func importExternalReferences(opts *FlattenOpts) error { + groupedRefs := reverseIndexForSchemaRefs(opts) + + for refStr, entry := range groupedRefs { + if !entry.Ref.HasFragmentOnly { + if swspec.Debug { + log.Printf("importing external schema for [%s] from %s", strings.Join(entry.Keys, ", "), refStr) + } + // resolve to actual schema + sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return err + } + if sch == nil { + return fmt.Errorf("no schema found at %s for [%s]", refStr, strings.Join(entry.Keys, ", ")) + } + if swspec.Debug { + log.Printf("importing external schema for [%s] from %s", strings.Join(entry.Keys, ", "), refStr) + } + + // generate a unique name + newName := uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + if swspec.Debug { + log.Printf("new name for [%s]: %s", strings.Join(entry.Keys, ", "), newName) + } + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := updateRef(opts.Swagger(), key, swspec.MustCreateRef("#"+path.Join("/definitions", newName))); err != nil { + return err + } + } + + // add the resolved schema to the definitions + saveSchema(opts.Swagger(), newName, sch) + } + } + return nil +} + +type refRevIdx struct { + Ref swspec.Ref + Keys []string +} + +func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx { + collected := make(map[string]refRevIdx) + for key, schRef := range opts.Spec.references.schemas { + if entry, ok := collected[schRef.String()]; ok { + entry.Keys = append(entry.Keys, key) + collected[schRef.String()] = entry + } else { + collected[schRef.String()] = refRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + } + return collected +} + +func nameFromRef(ref swspec.Ref) string { + u := ref.GetURL() + if u.Fragment != "" { + return swag.ToJSONName(path.Base(u.Fragment)) + } + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return swag.ToJSONName(bn[:len(bn)-len(ext)]) + } + return swag.ToJSONName(bn) + } + } + return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1)) +} + +func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) { + if schema == nil { + return + } + if spec.Definitions == nil { + spec.Definitions = make(map[string]swspec.Schema, 150) + } + spec.Definitions[name] = *schema +} + +func updateRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + if swspec.Debug { + log.Printf("updating ref for %s with %s", key, ref.String()) + } + pth := key[1:] + ptr, err := jsonpointer.New(pth) + if err != nil { + return err + } + + value, _, err := ptr.Get(spec) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + refable.Ref = ref + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case swspec.Schema: + parent, entry := path.Dir(pth), path.Base(pth) + if swspec.Debug { + log.Println("getting schema holder at:", parent) + } + + pptr, err := jsonpointer.New(parent) + if err != nil { + return err + } + pvalue, _, err := pptr.Get(spec) + if err != nil { + return fmt.Errorf("can't get parent for %s: %v", parent, err) + } + + switch container := pvalue.(type) { + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func containsString(names []string, name string) bool { + for _, nm := range names { + if nm == name { + return true + } + } + return false +} + +type opRef struct { + Method string + Path string + Key string + ID string + Op *swspec.Operation + Ref swspec.Ref +} + +type opRefs []opRef + +func (o opRefs) Len() int { return len(o) } +func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef { + var oprefs opRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, opRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: swspec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]opRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_test.go b/vendor/github.com/go-openapi/analysis/flatten_test.go new file mode 100644 index 000000000..6ccdf7751 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_test.go @@ -0,0 +1,805 @@ +package analysis + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/stretchr/testify/assert" +) + +func TestSaveDefinition(t *testing.T) { + sp := &spec.Swagger{} + saveSchema(sp, "theName", spec.StringProperty()) + assert.Contains(t, sp.Definitions, "theName") +} + +func TestNameFromRef(t *testing.T) { + values := []struct{ Source, Expected string }{ + {"#/definitions/errorModel", "errorModel"}, + {"http://somewhere.com/definitions/errorModel", "errorModel"}, + {"http://somewhere.com/definitions/errorModel.json", "errorModel"}, + {"/definitions/errorModel", "errorModel"}, + {"/definitions/errorModel.json", "errorModel"}, + {"http://somewhere.com", "somewhereCom"}, + {"#", ""}, + } + + for _, v := range values { + assert.Equal(t, v.Expected, nameFromRef(spec.MustCreateRef(v.Source))) + } +} + +func TestDefinitionName(t *testing.T) { + values := []struct { + Source, Expected string + Definitions spec.Definitions + }{ + {"#/definitions/errorModel", "errorModel", map[string]spec.Schema(nil)}, + {"http://somewhere.com/definitions/errorModel", "errorModel", map[string]spec.Schema(nil)}, + {"#/definitions/errorModel", "errorModel", map[string]spec.Schema{"apples": *spec.StringProperty()}}, + {"#/definitions/errorModel", "errorModelOAIGen", map[string]spec.Schema{"errorModel": *spec.StringProperty()}}, + {"#/definitions/errorModel", "errorModelOAIGen1", map[string]spec.Schema{"errorModel": *spec.StringProperty(), "errorModelOAIGen": *spec.StringProperty()}}, + {"#", "oaiGen", nil}, + } + + for _, v := range values { + assert.Equal(t, v.Expected, uniqifyName(v.Definitions, nameFromRef(spec.MustCreateRef(v.Source)))) + } +} + +func TestUpdateRef(t *testing.T) { + bp := filepath.Join("fixtures", "external_definitions.yml") + sp, err := loadSpec(bp) + if assert.NoError(t, err) { + + values := []struct { + Key string + Ref spec.Ref + }{ + {"#/parameters/someParam/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/parameters/1/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/responses/someResponse/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/namedAgain", spec.MustCreateRef("#/definitions/named")}, + {"#/definitions/datedTag/allOf/1", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/datedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/additionalItems", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/otherRecords/items", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/tags/additionalProperties", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/namedThing/properties/name", spec.MustCreateRef("#/definitions/named")}, + } + + for _, v := range values { + err := updateRef(sp, v.Key, v.Ref) + if assert.NoError(t, err) { + ptr, err := jsonpointer.New(v.Key[1:]) + if assert.NoError(t, err) { + vv, _, err := ptr.Get(sp) + + if assert.NoError(t, err) { + switch tv := vv.(type) { + case *spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String()) + case spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String()) + case *spec.SchemaOrBool: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String()) + case *spec.SchemaOrArray: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String()) + default: + assert.Fail(t, "unknown type", "got %T", vv) + } + } + } + } + } + } +} + +func TestImportExternalReferences(t *testing.T) { + bp := filepath.Join(".", "fixtures", "external_definitions.yml") + sp, err := loadSpec(bp) + if assert.NoError(t, err) { + + values := []struct { + Key string + Ref spec.Ref + }{ + {"#/parameters/someParam/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/parameters/1/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/responses/someResponse/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/namedAgain", spec.MustCreateRef("#/definitions/named")}, + {"#/definitions/datedTag/allOf/1", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/datedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/additionalItems", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/otherRecords/items", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/tags/additionalProperties", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/namedThing/properties/name", spec.MustCreateRef("#/definitions/named")}, + } + for _, v := range values { + // technically not necessary to run for each value, but if things go right + // this is idempotent, so having it repeat shouldn't matter + // this validates that behavior + err := importExternalReferences(&FlattenOpts{ + Spec: New(sp), + BasePath: bp, + }) + + if assert.NoError(t, err) { + + ptr, err := jsonpointer.New(v.Key[1:]) + if assert.NoError(t, err) { + vv, _, err := ptr.Get(sp) + + if assert.NoError(t, err) { + switch tv := vv.(type) { + case *spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "for %s", v.Key) + case spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "for %s", v.Key) + case *spec.SchemaOrBool: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String(), "for %s", v.Key) + case *spec.SchemaOrArray: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String(), "for %s", v.Key) + default: + assert.Fail(t, "unknown type", "got %T", vv) + } + } + } + } + } + assert.Len(t, sp.Definitions, 11) + assert.Contains(t, sp.Definitions, "tag") + assert.Contains(t, sp.Definitions, "named") + assert.Contains(t, sp.Definitions, "record") + } +} + +func TestRewriteSchemaRef(t *testing.T) { + bp := filepath.Join("fixtures", "inline_schemas.yml") + sp, err := loadSpec(bp) + if assert.NoError(t, err) { + + values := []struct { + Key string + Ref spec.Ref + }{ + {"#/parameters/someParam/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/parameters/1/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/responses/someResponse/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/namedAgain", spec.MustCreateRef("#/definitions/named")}, + {"#/definitions/datedTag/allOf/1", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/datedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/items/1", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/datedTaggedRecords/additionalItems", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/otherRecords/items", spec.MustCreateRef("#/definitions/record")}, + {"#/definitions/tags/additionalProperties", spec.MustCreateRef("#/definitions/tag")}, + {"#/definitions/namedThing/properties/name", spec.MustCreateRef("#/definitions/named")}, + } + + for i, v := range values { + err := rewriteSchemaToRef(sp, v.Key, v.Ref) + if assert.NoError(t, err) { + ptr, err := jsonpointer.New(v.Key[1:]) + if assert.NoError(t, err) { + vv, _, err := ptr.Get(sp) + + if assert.NoError(t, err) { + switch tv := vv.(type) { + case *spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "at %d for %s", i, v.Key) + case spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrBool: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrArray: + assert.Equal(t, v.Ref.String(), tv.Schema.Ref.String(), "at %d for %s", i, v.Key) + default: + assert.Fail(t, "unknown type", "got %T", vv) + } + } + } + } + } + } +} + +func TestSplitKey(t *testing.T) { + + type KeyFlag uint64 + + const ( + isOperation KeyFlag = 1 << iota + isDefinition + isSharedOperationParam + isOperationParam + isOperationResponse + isDefaultResponse + isStatusCodeResponse + ) + + values := []struct { + Key string + Flags KeyFlag + PathItemRef spec.Ref + PathRef spec.Ref + Name string + }{ + { + "#/paths/~1some~1where~1{id}/parameters/1/schema", + isOperation | isSharedOperationParam, + spec.Ref{}, + spec.MustCreateRef("#/paths/~1some~1where~1{id}"), + "", + }, + { + "#/paths/~1some~1where~1{id}/get/parameters/2/schema", + isOperation | isOperationParam, + spec.MustCreateRef("#/paths/~1some~1where~1{id}/GET"), + spec.MustCreateRef("#/paths/~1some~1where~1{id}"), + "", + }, + { + "#/paths/~1some~1where~1{id}/get/responses/default/schema", + isOperation | isOperationResponse | isDefaultResponse, + spec.MustCreateRef("#/paths/~1some~1where~1{id}/GET"), + spec.MustCreateRef("#/paths/~1some~1where~1{id}"), + "Default", + }, + { + "#/paths/~1some~1where~1{id}/get/responses/200/schema", + isOperation | isOperationResponse | isStatusCodeResponse, + spec.MustCreateRef("#/paths/~1some~1where~1{id}/GET"), + spec.MustCreateRef("#/paths/~1some~1where~1{id}"), + "OK", + }, + { + "#/definitions/namedAgain", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "namedAgain", + }, + { + "#/definitions/datedRecords/items/1", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "datedRecords", + }, + { + "#/definitions/datedRecords/items/1", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "datedRecords", + }, + { + "#/definitions/datedTaggedRecords/items/1", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "datedTaggedRecords", + }, + { + "#/definitions/datedTaggedRecords/additionalItems", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "datedTaggedRecords", + }, + { + "#/definitions/otherRecords/items", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "otherRecords", + }, + { + "#/definitions/tags/additionalProperties", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "tags", + }, + { + "#/definitions/namedThing/properties/name", + isDefinition, + spec.Ref{}, + spec.Ref{}, + "namedThing", + }, + } + + for i, v := range values { + parts := keyParts(v.Key) + pref := parts.PathRef() + piref := parts.PathItemRef() + assert.Equal(t, v.PathRef.String(), pref.String(), "pathRef: %s at %d", v.Key, i) + assert.Equal(t, v.PathItemRef.String(), piref.String(), "pathItemRef: %s at %d", v.Key, i) + + if v.Flags&isOperation != 0 { + assert.True(t, parts.IsOperation(), "isOperation: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsOperation(), "isOperation: %s at %d", v.Key, i) + } + if v.Flags&isDefinition != 0 { + assert.True(t, parts.IsDefinition(), "isDefinition: %s at %d", v.Key, i) + assert.Equal(t, v.Name, parts.DefinitionName(), "definition name: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsDefinition(), "isDefinition: %s at %d", v.Key, i) + if v.Name != "" { + assert.Equal(t, v.Name, parts.ResponseName(), "response name: %s at %d", v.Key, i) + } + } + if v.Flags&isOperationParam != 0 { + assert.True(t, parts.IsOperationParam(), "isOperationParam: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsOperationParam(), "isOperationParam: %s at %d", v.Key, i) + } + if v.Flags&isSharedOperationParam != 0 { + assert.True(t, parts.IsSharedOperationParam(), "isSharedOperationParam: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsSharedOperationParam(), "isSharedOperationParam: %s at %d", v.Key, i) + } + if v.Flags&isOperationResponse != 0 { + assert.True(t, parts.IsOperationResponse(), "isOperationResponse: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsOperationResponse(), "isOperationResponse: %s at %d", v.Key, i) + } + if v.Flags&isDefaultResponse != 0 { + assert.True(t, parts.IsDefaultResponse(), "isDefaultResponse: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsDefaultResponse(), "isDefaultResponse: %s at %d", v.Key, i) + } + if v.Flags&isStatusCodeResponse != 0 { + assert.True(t, parts.IsStatusCodeResponse(), "isStatusCodeResponse: %s at %d", v.Key, i) + } else { + assert.False(t, parts.IsStatusCodeResponse(), "isStatusCodeResponse: %s at %d", v.Key, i) + } + } +} + +func definitionPtr(key string) string { + if !strings.HasPrefix(key, "#/definitions") { + return key + } + return strings.Join(strings.Split(key, "/")[:3], "/") +} + +func TestNamesFromKey(t *testing.T) { + bp := filepath.Join("fixtures", "inline_schemas.yml") + sp, err := loadSpec(bp) + if assert.NoError(t, err) { + + values := []struct { + Key string + Names []string + }{ + {"#/paths/~1some~1where~1{id}/parameters/1/schema", []string{"GetSomeWhereID params body", "PostSomeWhereID params body"}}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema", []string{"GetSomeWhereID params body"}}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema", []string{"GetSomeWhereID Default body"}}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema", []string{"GetSomeWhereID OK body"}}, + {"#/definitions/namedAgain", []string{"namedAgain"}}, + {"#/definitions/datedTag/allOf/1", []string{"datedTag allOf 1"}}, + {"#/definitions/datedRecords/items/1", []string{"datedRecords tuple 1"}}, + {"#/definitions/datedTaggedRecords/items/1", []string{"datedTaggedRecords tuple 1"}}, + {"#/definitions/datedTaggedRecords/additionalItems", []string{"datedTaggedRecords tuple additionalItems"}}, + {"#/definitions/otherRecords/items", []string{"otherRecords items"}}, + {"#/definitions/tags/additionalProperties", []string{"tags additionalProperties"}}, + {"#/definitions/namedThing/properties/name", []string{"namedThing name"}}, + } + + for i, v := range values { + ptr, err := jsonpointer.New(definitionPtr(v.Key)[1:]) + if assert.NoError(t, err) { + vv, _, err := ptr.Get(sp) + if assert.NoError(t, err) { + switch tv := vv.(type) { + case *spec.Schema: + aschema, err := Schema(SchemaOpts{Schema: tv, Root: sp, BasePath: bp}) + if assert.NoError(t, err) { + names := namesFromKey(keyParts(v.Key), aschema, opRefsByRef(gatherOperations(New(sp), nil))) + assert.Equal(t, v.Names, names, "for %s at %d", v.Key, i) + } + case spec.Schema: + aschema, err := Schema(SchemaOpts{Schema: &tv, Root: sp, BasePath: bp}) + if assert.NoError(t, err) { + names := namesFromKey(keyParts(v.Key), aschema, opRefsByRef(gatherOperations(New(sp), nil))) + assert.Equal(t, v.Names, names, "for %s at %d", v.Key, i) + } + default: + assert.Fail(t, "unknown type", "got %T", vv) + } + } + } + } + } +} + +func TestDepthFirstSort(t *testing.T) { + bp := filepath.Join("fixtures", "inline_schemas.yml") + sp, err := loadSpec(bp) + values := []string{ + "#/paths/~1some~1where~1{id}/parameters/1/schema/properties/createdAt", + "#/paths/~1some~1where~1{id}/parameters/1/schema", + "#/paths/~1some~1where~1{id}/get/parameters/2/schema/properties/createdAt", + "#/paths/~1some~1where~1{id}/get/parameters/2/schema", + "#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/id", + "#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/value", + "#/paths/~1some~1where~1{id}/get/responses/200/schema", + "#/paths/~1some~1where~1{id}/get/responses/404/schema", + "#/paths/~1some~1where~1{id}/get/responses/default/schema/properties/createdAt", + "#/paths/~1some~1where~1{id}/get/responses/default/schema", + "#/definitions/datedRecords/items/1/properties/createdAt", + "#/definitions/datedTaggedRecords/items/1/properties/createdAt", + "#/definitions/namedThing/properties/name/properties/id", + "#/definitions/records/items/0/properties/createdAt", + "#/definitions/datedTaggedRecords/additionalItems/properties/id", + "#/definitions/datedTaggedRecords/additionalItems/properties/value", + "#/definitions/otherRecords/items/properties/createdAt", + "#/definitions/tags/additionalProperties/properties/id", + "#/definitions/tags/additionalProperties/properties/value", + "#/definitions/datedRecords/items/0", + "#/definitions/datedRecords/items/1", + "#/definitions/datedTag/allOf/0", + "#/definitions/datedTag/allOf/1", + "#/definitions/datedTag/properties/id", + "#/definitions/datedTag/properties/value", + "#/definitions/datedTaggedRecords/items/0", + "#/definitions/datedTaggedRecords/items/1", + "#/definitions/namedAgain/properties/id", + "#/definitions/namedThing/properties/name", + "#/definitions/pneumonoultramicroscopicsilicovolcanoconiosisAntidisestablishmentarianism/properties/floccinaucinihilipilificationCreatedAt", + "#/definitions/records/items/0", + "#/definitions/datedTaggedRecords/additionalItems", + "#/definitions/otherRecords/items", + "#/definitions/tags/additionalProperties", + "#/definitions/datedRecords", + "#/definitions/datedTag", + "#/definitions/datedTaggedRecords", + "#/definitions/namedAgain", + "#/definitions/namedThing", + "#/definitions/otherRecords", + "#/definitions/pneumonoultramicroscopicsilicovolcanoconiosisAntidisestablishmentarianism", + "#/definitions/records", + "#/definitions/tags", + } + if assert.NoError(t, err) { + a := New(sp) + result := sortDepthFirst(a.allSchemas) + assert.Equal(t, values, result) + } +} + +func TestNameInlinedSchemas(t *testing.T) { + bp := filepath.Join(".", "fixtures", "nested_inline_schemas.yml") + sp, err := loadSpec(bp) + values := []struct { + Key string + Location string + Ref spec.Ref + }{ + {"#/paths/~1some~1where~1{id}/parameters/1/schema/items", "#/definitions/postSomeWhereIdParamsBody/items", spec.MustCreateRef("#/definitions/postSomeWhereIdParamsBodyItems")}, + {"#/paths/~1some~1where~1{id}/parameters/1/schema", "#/paths/~1some~1where~1{id}/parameters/1/schema", spec.MustCreateRef("#/definitions/postSomeWhereIdParamsBody")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema/properties/record/items/2/properties/name", "#/definitions/getSomeWhereIdParamsBodyRecordItems2/properties/name", spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems2Name")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema/properties/record/items/1", "#/definitions/getSomeWhereIdParamsBodyRecord/items/1", spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems1")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema/properties/record/items/2", "#/definitions/getSomeWhereIdParamsBodyRecord/items/2", spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems2")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema/properties/record", "#/definitions/getSomeWhereIdParamsBodyOAIGen/properties/record", spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecord")}, + {"#/paths/~1some~1where~1{id}/get/parameters/2/schema", "#/paths/~1some~1where~1{id}/get/parameters/2/schema", spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyOAIGen")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/record/items/2/properties/name", "#/definitions/getSomeWhereIdOKBodyRecordItems2/properties/name", spec.MustCreateRef("#/definitions/getSomeWhereIdOKBodyRecordItems2Name")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/record/items/1", "#/definitions/getSomeWhereIdOKBodyRecord/items/1", spec.MustCreateRef("#/definitions/getSomeWhereIdOKBodyRecordItems1")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/record/items/2", "#/definitions/getSomeWhereIdOKBodyRecord/items/2", spec.MustCreateRef("#/definitions/getSomeWhereIdOKBodyRecordItems2")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema/properties/record", "#/definitions/getSomeWhereIdOKBody/properties/record", spec.MustCreateRef("#/definitions/getSomeWhereIdOKBodyRecord")}, + {"#/paths/~1some~1where~1{id}/get/responses/200/schema", "#/paths/~1some~1where~1{id}/get/responses/200/schema", spec.MustCreateRef("#/definitions/getSomeWhereIdOKBody")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema/properties/record/items/2/properties/name", "#/definitions/getSomeWhereIdDefaultBodyRecordItems2/properties/name", spec.MustCreateRef("#/definitions/getSomeWhereIdDefaultBodyRecordItems2Name")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema/properties/record/items/1", "#/definitions/getSomeWhereIdDefaultBodyRecord/items/1", spec.MustCreateRef("#/definitions/getSomeWhereIdDefaultBodyRecordItems1")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema/properties/record/items/2", "#/definitions/getSomeWhereIdDefaultBodyRecord/items/2", spec.MustCreateRef("#/definitions/getSomeWhereIdDefaultBodyRecordItems2")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema/properties/record", "#/definitions/getSomeWhereIdDefaultBody/properties/record", spec.MustCreateRef("#/definitions/getSomeWhereIdDefaultBodyRecord")}, + {"#/paths/~1some~1where~1{id}/get/responses/default/schema", "#/paths/~1some~1where~1{id}/get/responses/default/schema", spec.MustCreateRef("#/definitions/getSomeWhereIdDefaultBody")}, + {"#/definitions/nestedThing/properties/record/items/2/allOf/1/additionalProperties", "#/definitions/nestedThingRecordItems2AllOf1/additionalProperties", spec.MustCreateRef("#/definitions/nestedThingRecordItems2AllOf1AdditionalProperties")}, + {"#/definitions/nestedThing/properties/record/items/2/allOf/1", "#/definitions/nestedThingRecordItems2/allOf/1", spec.MustCreateRef("#/definitions/nestedThingRecordItems2AllOf1")}, + {"#/definitions/nestedThing/properties/record/items/2/properties/name", "#/definitions/nestedThingRecordItems2/properties/name", spec.MustCreateRef("#/definitions/nestedThingRecordItems2Name")}, + {"#/definitions/nestedThing/properties/record/items/1", "#/definitions/nestedThingRecord/items/1", spec.MustCreateRef("#/definitions/nestedThingRecordItems1")}, + {"#/definitions/nestedThing/properties/record/items/2", "#/definitions/nestedThingRecord/items/2", spec.MustCreateRef("#/definitions/nestedThingRecordItems2")}, + {"#/definitions/datedRecords/items/1", "#/definitions/datedRecords/items/1", spec.MustCreateRef("#/definitions/datedRecordsItems1")}, + {"#/definitions/datedTaggedRecords/items/1", "#/definitions/datedTaggedRecords/items/1", spec.MustCreateRef("#/definitions/datedTaggedRecordsItems1")}, + {"#/definitions/namedThing/properties/name", "#/definitions/namedThing/properties/name", spec.MustCreateRef("#/definitions/namedThingName")}, + {"#/definitions/nestedThing/properties/record", "#/definitions/nestedThing/properties/record", spec.MustCreateRef("#/definitions/nestedThingRecord")}, + {"#/definitions/records/items/0", "#/definitions/records/items/0", spec.MustCreateRef("#/definitions/recordsItems0")}, + {"#/definitions/datedTaggedRecords/additionalItems", "#/definitions/datedTaggedRecords/additionalItems", spec.MustCreateRef("#/definitions/datedTaggedRecordsItemsAdditionalItems")}, + {"#/definitions/otherRecords/items", "#/definitions/otherRecords/items", spec.MustCreateRef("#/definitions/otherRecordsItems")}, + {"#/definitions/tags/additionalProperties", "#/definitions/tags/additionalProperties", spec.MustCreateRef("#/definitions/tagsAdditionalProperties")}, + } + if assert.NoError(t, err) { + err := nameInlinedSchemas(&FlattenOpts{ + Spec: New(sp), + BasePath: bp, + }) + + if assert.NoError(t, err) { + for i, v := range values { + ptr, err := jsonpointer.New(v.Location[1:]) + if assert.NoError(t, err, "at %d for %s", i, v.Key) { + vv, _, err := ptr.Get(sp) + + if assert.NoError(t, err, "at %d for %s", i, v.Key) { + switch tv := vv.(type) { + case *spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "at %d for %s", i, v.Key) + case spec.Schema: + assert.Equal(t, v.Ref.String(), tv.Ref.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrBool: + var sRef spec.Ref + if tv != nil && tv.Schema != nil { + sRef = tv.Schema.Ref + } + assert.Equal(t, v.Ref.String(), sRef.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrArray: + var sRef spec.Ref + if tv != nil && tv.Schema != nil { + sRef = tv.Schema.Ref + } + assert.Equal(t, v.Ref.String(), sRef.String(), "at %d for %s", i, v.Key) + default: + assert.Fail(t, "unknown type", "got %T", vv) + } + } + } + } + } + + for k, rr := range New(sp).allSchemas { + if !strings.HasPrefix(k, "#/responses") && !strings.HasPrefix(k, "#/parameters") { + if rr.Schema != nil && rr.Schema.Ref.String() == "" && !rr.TopLevel { + asch, err := Schema(SchemaOpts{Schema: rr.Schema, Root: sp, BasePath: bp}) + if assert.NoError(t, err, "for key: %s", k) { + if !asch.IsSimpleSchema { + assert.Fail(t, "not a top level schema", "for key: %s", k) + } + } + } + } + } + } +} + +func TestFlatten(t *testing.T) { + bp := filepath.Join(".", "fixtures", "flatten.yml") + sp, err := loadSpec(bp) + values := []struct { + Key string + Location string + Ref spec.Ref + Expected interface{} + }{ + { + "#/responses/notFound/schema", + "#/responses/notFound/schema", + spec.MustCreateRef("#/definitions/error"), + nil, + }, + { + "#/paths/~1some~1where~1{id}/parameters/0", + "#/paths/~1some~1where~1{id}/parameters/0/name", + spec.Ref{}, + "id", + }, + { + "#/paths/~1other~1place", + "#/paths/~1other~1place/get/operationId", + spec.Ref{}, + "modelOp", + }, + { + "#/paths/~1some~1where~1{id}/get/parameters/0", + "#/paths/~1some~1where~1{id}/get/parameters/0/name", + spec.Ref{}, + "limit", + }, + { + "#/paths/~1some~1where~1{id}/get/parameters/1", + "#/paths/~1some~1where~1{id}/get/parameters/1/name", + spec.Ref{}, + "some", + }, + { + "#/paths/~1some~1where~1{id}/get/parameters/2", + "#/paths/~1some~1where~1{id}/get/parameters/2/name", + spec.Ref{}, + "other", + }, + { + "#/paths/~1some~1where~1{id}/get/parameters/3", + "#/paths/~1some~1where~1{id}/get/parameters/3/schema", + spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBody"), + "", + }, + { + "#/paths/~1some~1where~1{id}/get/responses/200", + "#/paths/~1some~1where~1{id}/get/responses/200/schema", + spec.MustCreateRef("#/definitions/getSomeWhereIdOKBody"), + "", + }, + { + "#/definitions/namedAgain", + "", + spec.MustCreateRef("#/definitions/named"), + "", + }, + { + "#/definitions/namedThing/properties/name", + "", + spec.MustCreateRef("#/definitions/named"), + "", + }, + { + "#/definitions/namedThing/properties/namedAgain", + "", + spec.MustCreateRef("#/definitions/namedAgain"), + "", + }, + { + "#/definitions/datedRecords/items/1", + "", + spec.MustCreateRef("#/definitions/record"), + "", + }, + { + "#/definitions/otherRecords/items", + "", + spec.MustCreateRef("#/definitions/record"), + "", + }, + { + "#/definitions/tags/additionalProperties", + "", + spec.MustCreateRef("#/definitions/tag"), + "", + }, + { + "#/definitions/datedTag/allOf/1", + "", + spec.MustCreateRef("#/definitions/tag"), + "", + }, + { + "#/definitions/nestedThingRecordItems2/allOf/1", + "", + spec.MustCreateRef("#/definitions/nestedThingRecordItems2AllOf1"), + "", + }, + { + "#/definitions/nestedThingRecord/items/1", + "", + spec.MustCreateRef("#/definitions/nestedThingRecordItems1"), + "", + }, + { + "#/definitions/nestedThingRecord/items/2", + "", + spec.MustCreateRef("#/definitions/nestedThingRecordItems2"), + "", + }, + { + "#/definitions/nestedThing/properties/record", + "", + spec.MustCreateRef("#/definitions/nestedThingRecord"), + "", + }, + { + "#/definitions/named", + "#/definitions/named/type", + spec.Ref{}, + spec.StringOrArray{"string"}, + }, + { + "#/definitions/error", + "#/definitions/error/properties/id/type", + spec.Ref{}, + spec.StringOrArray{"integer"}, + }, + { + "#/definitions/record", + "#/definitions/record/properties/createdAt/format", + spec.Ref{}, + "date-time", + }, + { + "#/definitions/getSomeWhereIdOKBody", + "#/definitions/getSomeWhereIdOKBody/properties/record", + spec.MustCreateRef("#/definitions/nestedThing"), + nil, + }, + { + "#/definitions/getSomeWhereIdParamsBody", + "#/definitions/getSomeWhereIdParamsBody/properties/record", + spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecord"), + nil, + }, + { + "#/definitions/getSomeWhereIdParamsBodyRecord", + "#/definitions/getSomeWhereIdParamsBodyRecord/items/1", + spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems1"), + nil, + }, + { + "#/definitions/getSomeWhereIdParamsBodyRecord", + "#/definitions/getSomeWhereIdParamsBodyRecord/items/2", + spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems2"), + nil, + }, + { + "#/definitions/getSomeWhereIdParamsBodyRecordItems2", + "#/definitions/getSomeWhereIdParamsBodyRecordItems2/allOf/0/format", + spec.Ref{}, + "date", + }, + { + "#/definitions/getSomeWhereIdParamsBodyRecordItems2Name", + "#/definitions/getSomeWhereIdParamsBodyRecordItems2Name/properties/createdAt/format", + spec.Ref{}, + "date-time", + }, + { + "#/definitions/getSomeWhereIdParamsBodyRecordItems2", + "#/definitions/getSomeWhereIdParamsBodyRecordItems2/properties/name", + spec.MustCreateRef("#/definitions/getSomeWhereIdParamsBodyRecordItems2Name"), + "date", + }, + } + if assert.NoError(t, err) { + err := Flatten(FlattenOpts{Spec: New(sp), BasePath: bp}) + if assert.NoError(t, err) { + for i, v := range values { + pk := v.Key[1:] + if v.Location != "" { + pk = v.Location[1:] + } + ptr, err := jsonpointer.New(pk) + if assert.NoError(t, err, "at %d for %s", i, v.Key) { + d, _, err := ptr.Get(sp) + if assert.NoError(t, err) { + if v.Ref.String() != "" { + switch s := d.(type) { + case *spec.Schema: + assert.Equal(t, v.Ref.String(), s.Ref.String(), "at %d for %s", i, v.Key) + case spec.Schema: + assert.Equal(t, v.Ref.String(), s.Ref.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrArray: + var sRef spec.Ref + if s != nil && s.Schema != nil { + sRef = s.Schema.Ref + } + assert.Equal(t, v.Ref.String(), sRef.String(), "at %d for %s", i, v.Key) + case *spec.SchemaOrBool: + var sRef spec.Ref + if s != nil && s.Schema != nil { + sRef = s.Schema.Ref + } + assert.Equal(t, v.Ref.String(), sRef.String(), "at %d for %s", i, v.Key) + default: + assert.Fail(t, "unknown type", "got %T at %d for %s", d, i, v.Key) + } + } else { + assert.Equal(t, v.Expected, d) + } + } + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 000000000..a547433ba --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,199 @@ +package analysis + +import ( + "fmt" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. No other parts of primary are modified. Consider calling +// FixEmptyResponseDescriptions() on the modified primary if you read +// them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No normalization of any keys takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + var skipped []string + opIds := getOpIds(primary) + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } + + for i, m := range mixins { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf("definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Definitions[k] = v + } + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf("paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIds[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", i) + } + opIds[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf("top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Parameters[k] = v + } + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf("top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Responses[k] = v + } + } + return skipped +} + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + if s.Paths != nil { + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } + } + for k, v := range s.Responses { + FixEmptyDesc(&v) + s.Responses[k] = v + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} + +// getOpIds extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIds(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + for _, op := range piops { + rv[op.ID] = true + } + } + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + return append(ops, op) +} diff --git a/vendor/github.com/go-openapi/analysis/mixin_test.go b/vendor/github.com/go-openapi/analysis/mixin_test.go new file mode 100644 index 000000000..1d8028217 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin_test.go @@ -0,0 +1,64 @@ +package analysis + +import "testing" + +const ( + widgetFile = "fixtures/widget-crud.yml" + fooFile = "fixtures/foo-crud.yml" + barFile = "fixtures/bar-crud.yml" + noPathsFile = "fixtures/no-paths.yml" + emptyPathsFile = "fixtures/empty-paths.json" +) + +func TestMixin(t *testing.T) { + + primary, err := loadSpec(widgetFile) + if err != nil { + t.Fatalf("Could not load '%v': %v\n", widgetFile, err) + } + mixin1, err := loadSpec(fooFile) + if err != nil { + t.Fatalf("Could not load '%v': %v\n", fooFile, err) + } + mixin2, err := loadSpec(barFile) + if err != nil { + t.Fatalf("Could not load '%v': %v\n", barFile, err) + } + mixin3, err := loadSpec(noPathsFile) + if err != nil { + t.Fatalf("Could not load '%v': %v\n", noPathsFile, err) + } + + collisions := Mixin(primary, mixin1, mixin2, mixin3) + if len(collisions) != 16 { + t.Errorf("TestMixin: Expected 16 collisions, got %v\n%v", len(collisions), collisions) + } + + if len(primary.Paths.Paths) != 7 { + t.Errorf("TestMixin: Expected 7 paths in merged, got %v\n", len(primary.Paths.Paths)) + } + + if len(primary.Definitions) != 8 { + t.Errorf("TestMixin: Expected 8 definitions in merged, got %v\n", len(primary.Definitions)) + } + + if len(primary.Parameters) != 4 { + t.Errorf("TestMixin: Expected 4 top level parameters in merged, got %v\n", len(primary.Parameters)) + } + + if len(primary.Responses) != 2 { + t.Errorf("TestMixin: Expected 2 top level responses in merged, got %v\n", len(primary.Responses)) + } + + // test that adding paths to a primary with no paths works (was NPE) + emptyPaths, err := loadSpec(emptyPathsFile) + if err != nil { + t.Fatalf("Could not load '%v': %v\n", emptyPathsFile, err) + } + + collisions = Mixin(emptyPaths, primary) + if len(collisions) != 0 { + t.Errorf("TestMixin: Expected 0 collisions, got %v\n%v", len(collisions), collisions) + } + +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 000000000..b616288bb --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,233 @@ +package analysis + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + if err := a.inferTuple(); err != nil { + return nil, err + } + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + opts := &spec.ExpandOptions{RelativeBase: a.basePath} + sch, err := spec.ResolveRefWithBase(a.root, &a.schema.Ref, opts) + if err != nil { + return err + } + if sch != nil { + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.inherits(rsch) + } + } + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if a.isObjectType() { + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + if a.IsMap { + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + } + } + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + fromValid := a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Len() < 2) + a.IsArray = fromValid || (a.hasItems && a.schema.Items.Len() < 2) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleArray = itsch.IsSimpleSchema + } + if len(a.schema.Items.Schemas) > 0 { + itsch, err := Schema(SchemaOpts{ + Schema: &a.schema.Items.Schemas[0], + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + return nil +} + +func (a *AnalyzedSchema) inferTuple() error { + tuple := a.hasItems && a.schema.Items.Len() > 1 + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems + return nil +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) + +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} diff --git a/vendor/github.com/go-openapi/analysis/schema_test.go b/vendor/github.com/go-openapi/analysis/schema_test.go new file mode 100644 index 000000000..0c386b2a4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema_test.go @@ -0,0 +1,266 @@ +package analysis + +import ( + "encoding/json" + "fmt" + "path" + "testing" + + "net/http" + "net/http/httptest" + + "github.com/go-openapi/spec" + "github.com/stretchr/testify/assert" +) + +var knownSchemas = []*spec.Schema{ + spec.BoolProperty(), // 0 + spec.StringProperty(), // 1 + spec.Int8Property(), // 2 + spec.Int16Property(), // 3 + spec.Int32Property(), // 4 + spec.Int64Property(), // 5 + spec.Float32Property(), // 6 + spec.Float64Property(), // 7 + spec.DateProperty(), // 8 + spec.DateTimeProperty(), // 9 + (&spec.Schema{}), // 10 + (&spec.Schema{}).Typed("object", ""), // 11 + (&spec.Schema{}).Typed("", ""), // 12 + (&spec.Schema{}).Typed("", "uuid"), // 13 +} + +func newCObj() *spec.Schema { + return (&spec.Schema{}).Typed("object", "").SetProperty("id", *spec.Int64Property()) +} + +var complexObject = newCObj() + +var complexSchemas = []*spec.Schema{ + complexObject, + spec.ArrayProperty(complexObject), + spec.MapProperty(complexObject), +} + +func knownRefs(base string) []spec.Ref { + urls := []string{"bool", "string", "integer", "float", "date", "object", "format"} + + var result []spec.Ref + for _, u := range urls { + result = append(result, spec.MustCreateRef(fmt.Sprintf("%s/%s", base, path.Join("known", u)))) + } + return result +} + +func complexRefs(base string) []spec.Ref { + urls := []string{"object", "array", "map"} + + var result []spec.Ref + for _, u := range urls { + result = append(result, spec.MustCreateRef(fmt.Sprintf("%s/%s", base, path.Join("complex", u)))) + } + return result +} + +func refServer() *httptest.Server { + mux := http.NewServeMux() + mux.Handle("/known/bool", schemaHandler(knownSchemas[0])) + mux.Handle("/known/string", schemaHandler(knownSchemas[1])) + mux.Handle("/known/integer", schemaHandler(knownSchemas[5])) + mux.Handle("/known/float", schemaHandler(knownSchemas[6])) + mux.Handle("/known/date", schemaHandler(knownSchemas[8])) + mux.Handle("/known/object", schemaHandler(knownSchemas[11])) + mux.Handle("/known/format", schemaHandler(knownSchemas[13])) + + mux.Handle("/complex/object", schemaHandler(complexSchemas[0])) + mux.Handle("/complex/array", schemaHandler(complexSchemas[1])) + mux.Handle("/complex/map", schemaHandler(complexSchemas[2])) + + return httptest.NewServer(mux) +} + +func refSchema(ref spec.Ref) *spec.Schema { + return &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} +} + +func schemaHandler(schema *spec.Schema) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + writeJSON(w, schema) + }) +} + +func writeJSON(w http.ResponseWriter, data interface{}) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + if err := enc.Encode(data); err != nil { + panic(err) + } +} + +func TestSchemaAnalysis_KnownTypes(t *testing.T) { + for i, v := range knownSchemas { + sch, err := Schema(SchemaOpts{Schema: v}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsKnownType, "item at %d should be a known type", i) + } + } + for i, v := range complexSchemas { + sch, err := Schema(SchemaOpts{Schema: v}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.False(t, sch.IsKnownType, "item at %d should not be a known type", i) + } + } + + serv := refServer() + defer serv.Close() + + for i, ref := range knownRefs(serv.URL) { + sch, err := Schema(SchemaOpts{Schema: refSchema(ref)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsKnownType, "item at %d should be a known type", i) + } + } + for i, ref := range complexRefs(serv.URL) { + sch, err := Schema(SchemaOpts{Schema: refSchema(ref)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.False(t, sch.IsKnownType, "item at %d should not be a known type", i) + } + } +} + +func TestSchemaAnalysis_Array(t *testing.T) { + for i, v := range append(knownSchemas, (&spec.Schema{}).Typed("array", "")) { + sch, err := Schema(SchemaOpts{Schema: spec.ArrayProperty(v)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsArray, "item at %d should be an array type", i) + assert.True(t, sch.IsSimpleArray, "item at %d should be a simple array type", i) + } + } + + for i, v := range complexSchemas { + sch, err := Schema(SchemaOpts{Schema: spec.ArrayProperty(v)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsArray, "item at %d should be an array type", i) + assert.False(t, sch.IsSimpleArray, "item at %d should not be a simple array type", i) + } + } + + serv := refServer() + defer serv.Close() + + for i, ref := range knownRefs(serv.URL) { + sch, err := Schema(SchemaOpts{Schema: spec.ArrayProperty(refSchema(ref))}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsArray, "item at %d should be an array type", i) + assert.True(t, sch.IsSimpleArray, "item at %d should be a simple array type", i) + } + } + for i, ref := range complexRefs(serv.URL) { + sch, err := Schema(SchemaOpts{Schema: spec.ArrayProperty(refSchema(ref))}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.False(t, sch.IsKnownType, "item at %d should not be a known type", i) + assert.True(t, sch.IsArray, "item at %d should be an array type", i) + assert.False(t, sch.IsSimpleArray, "item at %d should not be a simple array type", i) + } + } + +} + +func TestSchemaAnalysis_Map(t *testing.T) { + for i, v := range append(knownSchemas, spec.MapProperty(nil)) { + sch, err := Schema(SchemaOpts{Schema: spec.MapProperty(v)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsMap, "item at %d should be a map type", i) + assert.True(t, sch.IsSimpleMap, "item at %d should be a simple map type", i) + } + } + + for i, v := range complexSchemas { + sch, err := Schema(SchemaOpts{Schema: spec.MapProperty(v)}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsMap, "item at %d should be a map type", i) + assert.False(t, sch.IsSimpleMap, "item at %d should not be a simple map type", i) + } + } +} + +func TestSchemaAnalysis_ExtendedObject(t *testing.T) { + for i, v := range knownSchemas { + wex := spec.MapProperty(v).SetProperty("name", *spec.StringProperty()) + sch, err := Schema(SchemaOpts{Schema: wex}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsExtendedObject, "item at %d should be an extended map object type", i) + assert.False(t, sch.IsMap, "item at %d should not be a map type", i) + assert.False(t, sch.IsSimpleMap, "item at %d should not be a simple map type", i) + } + } +} + +func TestSchemaAnalysis_Tuple(t *testing.T) { + at := spec.ArrayProperty(nil) + at.Items = &spec.SchemaOrArray{} + at.Items.Schemas = append(at.Items.Schemas, *spec.StringProperty(), *spec.Int64Property()) + + sch, err := Schema(SchemaOpts{Schema: at}) + if assert.NoError(t, err) { + assert.True(t, sch.IsTuple) + assert.False(t, sch.IsTupleWithExtra) + assert.False(t, sch.IsKnownType) + assert.False(t, sch.IsSimpleSchema) + } +} + +func TestSchemaAnalysis_TupleWithExtra(t *testing.T) { + at := spec.ArrayProperty(nil) + at.Items = &spec.SchemaOrArray{} + at.Items.Schemas = append(at.Items.Schemas, *spec.StringProperty(), *spec.Int64Property()) + at.AdditionalItems = &spec.SchemaOrBool{Allows: true} + at.AdditionalItems.Schema = spec.Int32Property() + + sch, err := Schema(SchemaOpts{Schema: at}) + if assert.NoError(t, err) { + assert.False(t, sch.IsTuple) + assert.True(t, sch.IsTupleWithExtra) + assert.False(t, sch.IsKnownType) + assert.False(t, sch.IsSimpleSchema) + } +} + +func TestSchemaAnalysis_BaseType(t *testing.T) { + cl := (&spec.Schema{}).Typed("object", "").SetProperty("type", *spec.StringProperty()).WithDiscriminator("type") + + sch, err := Schema(SchemaOpts{Schema: cl}) + if assert.NoError(t, err) { + assert.True(t, sch.IsBaseType) + assert.False(t, sch.IsKnownType) + assert.False(t, sch.IsSimpleSchema) + } +} + +func TestSchemaAnalysis_SimpleSchema(t *testing.T) { + for i, v := range append(knownSchemas, spec.ArrayProperty(nil), spec.MapProperty(nil)) { + sch, err := Schema(SchemaOpts{Schema: v}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.True(t, sch.IsSimpleSchema, "item at %d should be a simple schema", i) + } + + asch, err := Schema(SchemaOpts{Schema: spec.ArrayProperty(v)}) + if assert.NoError(t, err, "failed to analyze array schema at %d: %v", i, err) { + assert.True(t, asch.IsSimpleSchema, "array item at %d should be a simple schema", i) + } + + msch, err := Schema(SchemaOpts{Schema: spec.MapProperty(v)}) + if assert.NoError(t, err, "failed to analyze map schema at %d: %v", i, err) { + assert.True(t, msch.IsSimpleSchema, "map item at %d should be a simple schema", i) + } + } + + for i, v := range complexSchemas { + sch, err := Schema(SchemaOpts{Schema: v}) + if assert.NoError(t, err, "failed to analyze schema at %d: %v", i, err) { + assert.False(t, sch.IsSimpleSchema, "item at %d should not be a simple schema", i) + } + } + +} diff --git a/vendor/github.com/go-openapi/errors/.github/CONTRIBUTING.md b/vendor/github.com/go-openapi/errors/.github/CONTRIBUTING.md new file mode 100644 index 000000000..7dea4240d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.github/CONTRIBUTING.md @@ -0,0 +1,117 @@ +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep go-swagger lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* go-swagger. + + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml new file mode 100644 index 000000000..8d22a34a9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: +- 1.7 +install: +- go get -u github.com/stretchr/testify/assert +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: gZGp9NaHxi7zawlXJXKY92BGeDR1x0tbIcTyU5nMKLq0fhIaiEBJEeALwZ4VgqsSv3DytSSF5mLH8fevAM3ixE6hxjKQ+lQuf7V/w3btCN1CSWgoua5LOh1kTnqZQtJuRvO4pzoJcT3bJWBsVZ07VGNVzzJEy/zAKCHFqBUCXShw7QemlLBcYWFNqveTlvDIfCzvouoLnPoXwxEpkjxe9uz/ZKZgAnup/fXjC8RFctmgCnkCyvJTk0Y/fZCsufixJrJhshBWTnlrFCzRmgNkz2d+i1Ls3+MJ5EJJ2Tx/A5S63dL49J1f9Kr0AKHADmulSy8JNzIckKwbyFMYUecrsW+Lsu9DhnVMy1jj5pKsJDLRi2iIU3fXTMWbcyQbXjbbnBO2mPdP3Tzme75y4D9fc8hUPeyqVv2BU26NEbQ7EF2pKJ93OXvci7HlwRBgdJa8j6mP2LEDClcPQW00g7N/OZe0cTOMa8L5AwiBlbArwqt9wv6YLJoTG0wpDhzWsFvbCg5bJxe28Yn3fIDD0Lk1I7iSnBbp/5gzF19jmxqvcT8tHRkDL4xfjbENFTZjA5uB4Z4pj4WSyWQILLV/Jwhe3fi9uQwdviFHfj5pnVrmNUiGSOQL672K5wl2c3E9mGwejvsu2dfEz28n7Y/FUnOpY3/cBS0n27JJaerS0zMKNLE= diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 000000000..48c49fb2d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,5 @@ +# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors) + +Shared errors used throughout the various libraries for the go-openapi toolkit \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 000000000..e83c6ddef --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,161 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{code, fmt.Sprintf(message, args...)} + } + return &apiError{code, message} +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +func errorAsJSON(err Error) []byte { + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} +} + +const head = "HEAD" + +// ServeError the error handler interface implementation +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != head { + rw.Write(errorAsJSON(e)) + } + case Error: + if e == nil { + rw.WriteHeader(http.StatusInternalServerError) + rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != head { + rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != head { + rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= 600 { + return 422 + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/api_test.go b/vendor/github.com/go-openapi/errors/api_test.go new file mode 100644 index 000000000..cb7c65928 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api_test.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServeError(t *testing.T) { + // method not allowed wins + // err abides by the Error interface + err := MethodNotAllowed("GET", []string{"POST", "PUT"}) + recorder := httptest.NewRecorder() + ServeError(recorder, nil, err) + assert.Equal(t, http.StatusMethodNotAllowed, recorder.Code) + assert.Equal(t, "POST,PUT", recorder.Header().Get("Allow")) + // assert.Equal(t, "application/json", recorder.Header().Get("content-type")) + assert.Equal(t, `{"code":405,"message":"method GET is not allowed, but [POST,PUT] are"}`, recorder.Body.String()) + + // renders status code from error when present + err = NotFound("") + recorder = httptest.NewRecorder() + ServeError(recorder, nil, err) + assert.Equal(t, http.StatusNotFound, recorder.Code) + // assert.Equal(t, "application/json", recorder.Header().Get("content-type")) + assert.Equal(t, `{"code":404,"message":"Not found"}`, recorder.Body.String()) + + // renders mapped status code from error when present + err = InvalidTypeName("someType") + recorder = httptest.NewRecorder() + ServeError(recorder, nil, err) + assert.Equal(t, http.StatusUnprocessableEntity, recorder.Code) + // assert.Equal(t, "application/json", recorder.Header().Get("content-type")) + assert.Equal(t, `{"code":601,"message":"someType is an invalid type name"}`, recorder.Body.String()) + + // defaults to internal server error + simpleErr := fmt.Errorf("some error") + recorder = httptest.NewRecorder() + ServeError(recorder, nil, simpleErr) + assert.Equal(t, http.StatusInternalServerError, recorder.Code) + // assert.Equal(t, "application/json", recorder.Header().Get("content-type")) + assert.Equal(t, `{"code":500,"message":"some error"}`, recorder.Body.String()) + + // composite errors + + // unrecognized: return internal error with first error only - the second error is ignored + compositeErr := &CompositeError{ + Errors: []error{ + fmt.Errorf("firstError"), + fmt.Errorf("anotherError"), + }, + } + recorder = httptest.NewRecorder() + ServeError(recorder, nil, compositeErr) + assert.Equal(t, http.StatusInternalServerError, recorder.Code) + assert.Equal(t, `{"code":500,"message":"firstError"}`, recorder.Body.String()) + + // recognized: return internal error with first error only - the second error is ignored + compositeErr = &CompositeError{ + Errors: []error{ + New(600, "myApiError"), + New(601, "myOtherApiError"), + }, + } + recorder = httptest.NewRecorder() + ServeError(recorder, nil, compositeErr) + assert.Equal(t, CompositeErrorCode, recorder.Code) + assert.Equal(t, `{"code":600,"message":"myApiError"}`, recorder.Body.String()) + + // recognized API Error, flattened + compositeErr = &CompositeError{ + Errors: []error{ + &CompositeError{ + Errors: []error{ + New(600, "myApiError"), + New(601, "myOtherApiError"), + }, + }, + }, + } + recorder = httptest.NewRecorder() + ServeError(recorder, nil, compositeErr) + assert.Equal(t, CompositeErrorCode, recorder.Code) + assert.Equal(t, `{"code":600,"message":"myApiError"}`, recorder.Body.String()) + + // check guard against empty CompositeError (e.g. nil Error interface) + compositeErr = &CompositeError{ + Errors: []error{ + &CompositeError{ + Errors: []error{}, + }, + }, + } + recorder = httptest.NewRecorder() + ServeError(recorder, nil, compositeErr) + assert.Equal(t, http.StatusInternalServerError, recorder.Code) + assert.Equal(t, `{"code":500,"message":"Unknown error"}`, recorder.Body.String()) + + // check guard against nil type + recorder = httptest.NewRecorder() + ServeError(recorder, nil, nil) + assert.Equal(t, http.StatusInternalServerError, recorder.Code) + assert.Equal(t, `{"code":500,"message":"Unknown error"}`, recorder.Body.String()) +} + +func TestAPIErrors(t *testing.T) { + err := New(402, "this failed %s", "yada") + assert.Error(t, err) + assert.EqualValues(t, 402, err.Code()) + assert.EqualValues(t, "this failed yada", err.Error()) + + err = NotFound("this failed %d", 1) + assert.Error(t, err) + assert.EqualValues(t, http.StatusNotFound, err.Code()) + assert.EqualValues(t, "this failed 1", err.Error()) + + err = NotFound("") + assert.Error(t, err) + assert.EqualValues(t, http.StatusNotFound, err.Code()) + assert.EqualValues(t, "Not found", err.Error()) + + err = NotImplemented("not implemented") + assert.Error(t, err) + assert.EqualValues(t, http.StatusNotImplemented, err.Code()) + assert.EqualValues(t, "not implemented", err.Error()) + + err = MethodNotAllowed("GET", []string{"POST", "PUT"}) + assert.Error(t, err) + assert.EqualValues(t, http.StatusMethodNotAllowed, err.Code()) + assert.EqualValues(t, "method GET is not allowed, but [POST,PUT] are", err.Error()) + + err = InvalidContentType("application/saml", []string{"application/json", "application/x-yaml"}) + assert.Error(t, err) + assert.EqualValues(t, http.StatusUnsupportedMediaType, err.Code()) + assert.EqualValues(t, "unsupported media type \"application/saml\", only [application/json application/x-yaml] are allowed", err.Error()) + + err = InvalidResponseFormat("application/saml", []string{"application/json", "application/x-yaml"}) + assert.Error(t, err) + assert.EqualValues(t, http.StatusNotAcceptable, err.Code()) + assert.EqualValues(t, "unsupported media type requested, only [application/json application/x-yaml] are available", err.Error()) +} + +func TestValidateName(t *testing.T) { + v := &Validation{Name: "myValidation", message: "myMessage"} + + // unchanged + vv := v.ValidateName("") + assert.EqualValues(t, "myValidation", vv.Name) + assert.EqualValues(t, "myMessage", vv.message) + + // unchanged + vv = v.ValidateName("myNewName") + assert.EqualValues(t, "myValidation", vv.Name) + assert.EqualValues(t, "myMessage", vv.message) + + v.Name = "" + + // unchanged + vv = v.ValidateName("") + assert.EqualValues(t, "", vv.Name) + assert.EqualValues(t, "myMessage", vv.message) + + // forced + vv = v.ValidateName("myNewName") + assert.EqualValues(t, "myNewName", vv.Name) + assert.EqualValues(t, "myNewNamemyMessage", vv.message) +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 000000000..70eb960b1 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,20 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(401, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/auth_test.go b/vendor/github.com/go-openapi/errors/auth_test.go new file mode 100644 index 000000000..eee7a5c7e --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth_test.go @@ -0,0 +1,27 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnauthenticated(t *testing.T) { + err := Unauthenticated("basic") + assert.EqualValues(t, 401, err.Code()) + assert.Equal(t, "unauthenticated for basic", err.Error()) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 000000000..963d42740 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,28 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). + +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 000000000..a80ddc978 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,85 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// ValidateName produces an error message name for an aliased property +func (e *Validation) ValidateName(name string) *Validation { + if e.Name == "" && name != "" { + e.Name = name + e.message = name + e.message + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + var values []interface{} + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + var values []interface{} + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 000000000..6390d4636 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,51 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { + Section string + MissingSpecification []string + MissingRegistration []string +} + +// +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/middleware_test.go b/vendor/github.com/go-openapi/errors/middleware_test.go new file mode 100644 index 000000000..feff2074d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware_test.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAPIVerificationFailed(t *testing.T) { + err := &APIVerificationFailed{ + Section: "consumer", + MissingSpecification: []string{"application/json", "application/x-yaml"}, + MissingRegistration: []string{"text/html", "application/xml"}, + } + + expected := `missing [text/html, application/xml] consumer registrations +missing from spec file [application/json, application/x-yaml] consumer` + assert.Equal(t, expected, err.Error()) +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 000000000..1bae87302 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "fmt" + +// ParseError respresents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: 400, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/vendor/github.com/go-openapi/errors/parsing_test.go b/vendor/github.com/go-openapi/errors/parsing_test.go new file mode 100644 index 000000000..a51f773c9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing_test.go @@ -0,0 +1,32 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseError(t *testing.T) { + err := NewParseError("Content-Type", "header", "application(", errors.New("unable to parse")) + assert.EqualValues(t, 400, err.Code()) + assert.Equal(t, "parsing Content-Type header from \"application(\" failed, because unable to parse", err.Error()) + + err = NewParseError("Content-Type", "", "application(", errors.New("unable to parse")) + assert.EqualValues(t, 400, err.Code()) + assert.Equal(t, "parsing Content-Type from \"application(\" failed, because unable to parse", err.Error()) +} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 000000000..14fb2c5f1 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,562 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maxIncFail = "%s in %s should be less than or equal to %v" + maxExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maxItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maxIncFailNoIn = "%s should be less than or equal to %v" + maxExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maxItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = 422 + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = 600 + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append([]error{}, errors...), + message: "validation failure list", + } +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, max int64) *Validation { + msg := fmt.Sprintf(maxItemsFail, name, in, max) + if in == "" { + msg = fmt.Sprintf(maxItemsFailNoIn, name, max) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, min int64) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, min) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, min) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + message: msg, + } +} + +// ExceedsMaximumInt error for when maxinum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMaximumUint error for when maxinum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMaximum error for when maxinum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMinimumInt error for when maxinum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// ExceedsMinimumUint error for when maxinum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// ExceedsMinimum error for when maxinum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: multiple, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, max int64) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, max) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, max) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, min int64) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, min) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, min) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/errors/schema_test.go b/vendor/github.com/go-openapi/errors/schema_test.go new file mode 100644 index 000000000..53093f0c0 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema_test.go @@ -0,0 +1,364 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSchemaErrors(t *testing.T) { + err := InvalidType("confirmed", "query", "boolean", nil) + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed in query must be of type boolean", err.Error()) + + err = InvalidType("confirmed", "", "boolean", nil) + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed must be of type boolean", err.Error()) + + err = InvalidType("confirmed", "query", "boolean", "hello") + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed in query must be of type boolean: \"hello\"", err.Error()) + + err = InvalidType("confirmed", "query", "boolean", errors.New("hello")) + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed in query must be of type boolean, because: hello", err.Error()) + + err = InvalidType("confirmed", "", "boolean", "hello") + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed must be of type boolean: \"hello\"", err.Error()) + + err = InvalidType("confirmed", "", "boolean", errors.New("hello")) + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "confirmed must be of type boolean, because: hello", err.Error()) + + err = DuplicateItems("uniques", "query") + assert.Error(t, err) + assert.EqualValues(t, UniqueFailCode, err.Code()) + assert.Equal(t, "uniques in query shouldn't contain duplicates", err.Error()) + + err = DuplicateItems("uniques", "") + assert.Error(t, err) + assert.EqualValues(t, UniqueFailCode, err.Code()) + assert.Equal(t, "uniques shouldn't contain duplicates", err.Error()) + + err = TooManyItems("something", "query", 5) + assert.Error(t, err) + assert.EqualValues(t, MaxItemsFailCode, err.Code()) + assert.Equal(t, "something in query should have at most 5 items", err.Error()) + + err = TooManyItems("something", "", 5) + assert.Error(t, err) + assert.EqualValues(t, MaxItemsFailCode, err.Code()) + assert.Equal(t, "something should have at most 5 items", err.Error()) + + err = TooFewItems("something", "", 5) + assert.Error(t, err) + assert.EqualValues(t, MinItemsFailCode, err.Code()) + assert.Equal(t, "something should have at least 5 items", err.Error()) + + err = ExceedsMaximumInt("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than or equal to 5", err.Error()) + + err = ExceedsMaximumInt("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than or equal to 5", err.Error()) + + err = ExceedsMaximumInt("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than 5", err.Error()) + + err = ExceedsMaximumInt("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than 5", err.Error()) + + err = ExceedsMaximumUint("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than or equal to 5", err.Error()) + + err = ExceedsMaximumUint("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than or equal to 5", err.Error()) + + err = ExceedsMaximumUint("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than 5", err.Error()) + + err = ExceedsMaximumUint("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than 5", err.Error()) + + err = ExceedsMaximum("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than or equal to 5", err.Error()) + + err = ExceedsMaximum("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than or equal to 5", err.Error()) + + err = ExceedsMaximum("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something in query should be less than 5", err.Error()) + + err = ExceedsMaximum("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MaxFailCode, err.Code()) + assert.Equal(t, "something should be less than 5", err.Error()) + + err = ExceedsMinimumInt("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimumInt("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimumInt("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than 5", err.Error()) + + err = ExceedsMinimumInt("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than 5", err.Error()) + + err = ExceedsMinimumUint("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimumUint("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimumUint("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than 5", err.Error()) + + err = ExceedsMinimumUint("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than 5", err.Error()) + + err = ExceedsMinimum("something", "query", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimum("something", "", 5, false) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than or equal to 5", err.Error()) + + err = ExceedsMinimum("something", "query", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something in query should be greater than 5", err.Error()) + + err = ExceedsMinimum("something", "", 5, true) + assert.Error(t, err) + assert.EqualValues(t, MinFailCode, err.Code()) + assert.Equal(t, "something should be greater than 5", err.Error()) + + err = NotMultipleOf("something", "query", 5) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfFailCode, err.Code()) + assert.Equal(t, "something in query should be a multiple of 5", err.Error()) + + err = NotMultipleOf("something", "query", float64(5)) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfFailCode, err.Code()) + assert.Equal(t, "something in query should be a multiple of 5", err.Error()) + + err = NotMultipleOf("something", "query", uint64(5)) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfFailCode, err.Code()) + assert.Equal(t, "something in query should be a multiple of 5", err.Error()) + + err = NotMultipleOf("something", "", 5) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfFailCode, err.Code()) + assert.Equal(t, "something should be a multiple of 5", err.Error()) + + err = EnumFail("something", "query", "yada", []interface{}{"hello", "world"}) + assert.Error(t, err) + assert.EqualValues(t, EnumFailCode, err.Code()) + assert.Equal(t, "something in query should be one of [hello world]", err.Error()) + + err = EnumFail("something", "", "yada", []interface{}{"hello", "world"}) + assert.Error(t, err) + assert.EqualValues(t, EnumFailCode, err.Code()) + assert.Equal(t, "something should be one of [hello world]", err.Error()) + + err = Required("something", "query") + assert.Error(t, err) + assert.EqualValues(t, RequiredFailCode, err.Code()) + assert.Equal(t, "something in query is required", err.Error()) + + err = Required("something", "") + assert.Error(t, err) + assert.EqualValues(t, RequiredFailCode, err.Code()) + assert.Equal(t, "something is required", err.Error()) + + err = TooLong("something", "query", 5) + assert.Error(t, err) + assert.EqualValues(t, TooLongFailCode, err.Code()) + assert.Equal(t, "something in query should be at most 5 chars long", err.Error()) + + err = TooLong("something", "", 5) + assert.Error(t, err) + assert.EqualValues(t, TooLongFailCode, err.Code()) + assert.Equal(t, "something should be at most 5 chars long", err.Error()) + + err = TooShort("something", "query", 5) + assert.Error(t, err) + assert.EqualValues(t, TooShortFailCode, err.Code()) + assert.Equal(t, "something in query should be at least 5 chars long", err.Error()) + + err = TooShort("something", "", 5) + assert.Error(t, err) + assert.EqualValues(t, TooShortFailCode, err.Code()) + assert.Equal(t, "something should be at least 5 chars long", err.Error()) + + err = FailedPattern("something", "query", "\\d+") + assert.Error(t, err) + assert.EqualValues(t, PatternFailCode, err.Code()) + assert.Equal(t, "something in query should match '\\d+'", err.Error()) + + err = FailedPattern("something", "", "\\d+") + assert.Error(t, err) + assert.EqualValues(t, PatternFailCode, err.Code()) + assert.Equal(t, "something should match '\\d+'", err.Error()) + + err = InvalidTypeName("something") + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "something is an invalid type name", err.Error()) + + err = AdditionalItemsNotAllowed("something", "query") + assert.Error(t, err) + assert.EqualValues(t, NoAdditionalItemsCode, err.Code()) + assert.Equal(t, "something in query can't have additional items", err.Error()) + + err = AdditionalItemsNotAllowed("something", "") + assert.Error(t, err) + assert.EqualValues(t, NoAdditionalItemsCode, err.Code()) + assert.Equal(t, "something can't have additional items", err.Error()) + + err = InvalidCollectionFormat("something", "query", "yada") + assert.Error(t, err) + assert.EqualValues(t, InvalidTypeCode, err.Code()) + assert.Equal(t, "the collection format \"yada\" is not supported for the query param \"something\"", err.Error()) + + err2 := CompositeValidationError() + assert.Error(t, err2) + assert.EqualValues(t, CompositeErrorCode, err2.Code()) + assert.Equal(t, "validation failure list", err2.Error()) + + err2 = CompositeValidationError(fmt.Errorf("First error"), fmt.Errorf("Second error")) + assert.Error(t, err2) + assert.EqualValues(t, CompositeErrorCode, err2.Code()) + assert.Equal(t, "validation failure list:\nFirst error\nSecond error", err2.Error()) + + //func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + err = MultipleOfMustBePositive("path", "body", float64(-10)) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfMustBePositiveCode, err.Code()) + assert.Equal(t, `factor MultipleOf declared for path must be positive: -10`, err.Error()) + + err = MultipleOfMustBePositive("path", "body", int64(-10)) + assert.Error(t, err) + assert.EqualValues(t, MultipleOfMustBePositiveCode, err.Code()) + assert.Equal(t, `factor MultipleOf declared for path must be positive: -10`, err.Error()) + + // func PropertyNotAllowed(name, in, key string) *Validation { + err = PropertyNotAllowed("path", "body", "key") + assert.Error(t, err) + assert.EqualValues(t, UnallowedPropertyCode, err.Code()) + //unallowedProperty = "%s.%s in %s is a forbidden property" + assert.Equal(t, "path.key in body is a forbidden property", err.Error()) + + err = PropertyNotAllowed("path", "", "key") + assert.Error(t, err) + assert.EqualValues(t, UnallowedPropertyCode, err.Code()) + //unallowedPropertyNoIn = "%s.%s is a forbidden property" + assert.Equal(t, "path.key is a forbidden property", err.Error()) + + //func TooManyProperties(name, in string, n int64) *Validation { + err = TooManyProperties("path", "body", 10) + assert.Error(t, err) + assert.EqualValues(t, TooManyPropertiesCode, err.Code()) + //tooManyProperties = "%s in %s should have at most %d properties" + assert.Equal(t, "path in body should have at most 10 properties", err.Error()) + + err = TooManyProperties("path", "", 10) + assert.Error(t, err) + assert.EqualValues(t, TooManyPropertiesCode, err.Code()) + //tooManyPropertiesNoIn = "%s should have at most %d properties" + assert.Equal(t, "path should have at most 10 properties", err.Error()) + + err = TooFewProperties("path", "body", 10) + // func TooFewProperties(name, in string, n int64) *Validation { + assert.Error(t, err) + assert.EqualValues(t, TooFewPropertiesCode, err.Code()) + //tooFewProperties = "%s in %s should have at least %d properties" + assert.Equal(t, "path in body should have at least 10 properties", err.Error()) + + err = TooFewProperties("path", "", 10) + // func TooFewProperties(name, in string, n int64) *Validation { + assert.Error(t, err) + assert.EqualValues(t, TooFewPropertiesCode, err.Code()) + //tooFewPropertiesNoIn = "%s should have at least %d properties" + assert.Equal(t, "path should have at least 10 properties", err.Error()) + + //func FailedAllPatternProperties(name, in, key string) *Validation { + err = FailedAllPatternProperties("path", "body", "key") + assert.Error(t, err) + assert.EqualValues(t, FailedAllPatternPropsCode, err.Code()) + //failedAllPatternProps = "%s.%s in %s failed all pattern properties" + assert.Equal(t, "path.key in body failed all pattern properties", err.Error()) + + err = FailedAllPatternProperties("path", "", "key") + assert.Error(t, err) + assert.EqualValues(t, FailedAllPatternPropsCode, err.Code()) + //failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + assert.Equal(t, "path.key failed all pattern properties", err.Error()) +} diff --git a/vendor/github.com/go-openapi/loads/.drone.sec b/vendor/github.com/go-openapi/loads/.drone.sec new file mode 100644 index 000000000..6d3e84399 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.drone.sec @@ -0,0 +1 @@ +eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.xUjixvmMMeampw0Doyr_XLvcV5ICmDgDFmlcWqgmO84O3Hwn6dqbMkwOjpKMOyEJW_98b5Om5ED59IFt2S0T_OarlrsJL8jOK5fqxSMNXy2w8LfI-e5l1URverW41ofAVK8m9wK05q2BSJM_M6PyyODaQeDBiCVK1HreMZBlXpuUDVtBMPILQoga0eSZOsTR3DYEpZIS0A0Rsa5yIhMYR5d5-JMYqbqOCB7tNJ-BM83OzYgL7Hrz0J15kqaJmhQ-GJoMJDzOemSO9KxLCOfSPp11R_G3Mfd48xYnuiRuPOTakbOCLxuYviH6uoGVIOhnMyY9qKiDKbOn4BQUi1-igA.6qjQzq9nzAxRRKV_.z79R5cMFAEuEaAh6U9ykiL8oIqzMbs_I2C-hSFRh3HYRJ4fTB-9LrcbF0uASIOq7bBn4OQzW-0QFwYOs1uaawmrByGngV5d0afiZf_LBKcmTF2vtxRi_A_nxD-EHoPmh3lKBU5WNDe_8kLjEeS89HeyyFPuv5iQbqhzdqPFohHKVigwVqVYYLjB8GWQ4t7tC4c8l5rHanaXf71W0e3op2m8bebpZL0JPGhnULVA1oU27TYeLsO112JkIYtBwZxzvAs--bBFoKeGJWVMFzrKN68UACGZ9RFw0uGJbBmVC4-jRuIc6XpqeEqw3KG-rjFzkeEor3575qW-8kiXYqpub9SFUc3SSZkxJ8hB3SrnMBOuDUSenrXNpAbltmV3KAALzN3_bMBQuihwSRIn0Hg7-Dpni8BieMe44RMDvRu6p_71aeU_KW4V7Umy_h8gpIvQFuKGdTQH2ahsyCXL0ojqjMbVMdoWpDQTQ2_Fy8Qt_p2kJ8BgDo-1Akd4a6BNU2NGqsdnrJmtVKcTqLBadf9ylCwxHdGVrtNYORALSms2T6Q1s-poQnMjIwN8lnUD8ABUBpt4uVtrYkiWPVwrwywLQeiHhR-pboe_53kWDAx4Hy4rpbKsaxanYhy_bEbAYKb3aIUA.75GD4kRBCQdcGFYP1QYdCg \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/.drone.yml b/vendor/github.com/go-openapi/loads/.drone.yml new file mode 100644 index 000000000..982291035 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.drone.yml @@ -0,0 +1,39 @@ +clone: + path: github.com/go-openapi/loads + +matrix: + GO_VERSION: + - "1.6" + +build: + integration: + image: golang:$$GO_VERSION + pull: true + environment: + GOCOVMODE: "count" + commands: + - go get -u github.com/axw/gocov/gocov + - go get -u gopkg.in/matm/v1/gocov-html + - go get -u github.com/cee-dub/go-junit-report + - go get -u github.com/stretchr/testify/assert + - go get -u gopkg.in/yaml.v2 + - go get -u github.com/go-openapi/swag + - go get -u github.com/go-openapi/analysis + - go get -u github.com/go-openapi/spec + - ./hack/build-drone.sh + +notify: + slack: + channel: bots + webhook_url: $$SLACK_URL + username: drone + +publish: + coverage: + server: https://coverage.vmware.run + token: $$GITHUB_TOKEN + # threshold: 70 + # must_increase: true + when: + matrix: + GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.github/CONTRIBUTING.md b/vendor/github.com/go-openapi/loads/.github/CONTRIBUTING.md new file mode 100644 index 000000000..7dea4240d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.github/CONTRIBUTING.md @@ -0,0 +1,117 @@ +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep go-swagger lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* go-swagger. + + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 000000000..e4f15f17b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 000000000..b0d357e65 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,16 @@ +language: go +go: +- 1.8 +install: +- go get -u github.com/stretchr/testify +- go get -u github.com/go-openapi/analysis +- go get -u github.com/go-openapi/spec +- go get -u github.com/go-openapi/swag +- go get -u gopkg.in/yaml.v2 +script: +- ./hack/coverage +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 000000000..6dbb8342e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,5 @@ +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithArrayRef.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithArrayRef.json new file mode 100644 index 000000000..25e8903a7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithArrayRef.json @@ -0,0 +1,17 @@ +{ + "required": [ + "id" + ], + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "children": { + "type": "array", + "items": { + "$ref": "Person" + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithComposition.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithComposition.json new file mode 100644 index 000000000..21e01dc2a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithComposition.json @@ -0,0 +1,68 @@ +{ + "definitions": { + "Cat": { + "description": "A representation of a cat", + "allOf": [ + { "$ref": "#/models/Pet" }, + { + "properties": { + "huntingSkill": { + "type": "string", + "description": "The measured skill for hunting", + "default": "lazy", + "enum": ["clueless", "lazy", "adventerous", "aggressive"] + } + }, + "required": [ "huntingSkill" ] + } + ] + }, + "Dog": { + "description": "A representation of a dog", + "allOf": [ + { "$ref": "#/models/Pet" }, + { + "properties": { + "packSize": { + "type": "integer", + "format": "int32", + "description": "the size of the pack the dog is from", + "default": 0, + "minimum": 0 + } + }, + "required": [ "name", "packSize" ] + } + ] + }, + "Fish": { + "description": "A representation of a fish", + "allOf": [ + { "$ref": "#/models/Pet" }, + { + "properties": { + "fins": { + "type": "integer", + "format": "int32", + "description": "count of fins", + "minimum": 0 + } + }, + "required": [ "fins" ] + } + ] + }, + "Pet": { + "discriminator": "petType", + "properties": { + "name": { + "type": "string" + }, + "petType": { + "type": "string" + } + }, + "required": [ "name", "petType" ] + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithDateTimeMap.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithDateTimeMap.json new file mode 100644 index 000000000..a5ebdf9b2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithDateTimeMap.json @@ -0,0 +1,7 @@ +{ + "description": "true", + "additionalProperties": { + "type": "string", + "format": "date-time" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithExamples.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithExamples.json new file mode 100644 index 000000000..f62be6624 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithExamples.json @@ -0,0 +1,28 @@ +{ + "definitions": { + "Pet": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + }, + "Dog": { + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt32Map.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt32Map.json new file mode 100644 index 000000000..b4597e846 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt32Map.json @@ -0,0 +1,7 @@ +{ + "description": "This is a Map[String, Integer]", + "additionalProperties": { + "type": "integer", + "format": "int32" + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt64Map.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt64Map.json new file mode 100644 index 000000000..5160f21c4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithInt64Map.json @@ -0,0 +1,7 @@ +{ + "description": "true", + "additionalProperties": { + "type": "integer", + "format": "int64" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithMultipleProperties.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithMultipleProperties.json new file mode 100644 index 000000000..dba5e64ab --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithMultipleProperties.json @@ -0,0 +1,67 @@ +{ + "description": "true", + "properties": { + "booleanValue": { + "type": "boolean" + }, + "byteValue": { + "type": "string", + "format": "byte" + }, + "dateTimeValue": { + "type": "string", + "format": "date-time" + }, + "int32Value": { + "type": "integer", + "format": "int32" + }, + "int64Value": { + "type": "integer", + "format": "int64" + }, + "stringValue": { + "type": "string" + }, + "booleanArrayValue": { + "type": "array", + "items": { + "type": "boolean" + } + }, + "byteArrayValue": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + } + }, + "dateTimeArrayValue": { + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } + }, + "int32ArrayValue": { + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } + }, + "int64ArrayValue": { + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + "stringArrayValue": { + "type": "array", + "items": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithObjectMap.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithObjectMap.json new file mode 100644 index 000000000..3e9fb7700 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithObjectMap.json @@ -0,0 +1,15 @@ +{ + "description": "This is a Map[String, { id: Long, name: String}]", + "additionalProperties": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithPrimitiveArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithPrimitiveArray.json new file mode 100644 index 000000000..bc1a8e174 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithPrimitiveArray.json @@ -0,0 +1,18 @@ +{ + "required": [ + "id" + ], + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "childrensAges": { + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithStringProperty.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithStringProperty.json new file mode 100644 index 000000000..53b4fe1d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithStringProperty.json @@ -0,0 +1,8 @@ +{ + "description": "true", + "properties": { + "name": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithXmlAttributes.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithXmlAttributes.json new file mode 100644 index 000000000..1aed1a9f7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/modelWithXmlAttributes.json @@ -0,0 +1,26 @@ +{ + "description": "this model serves xml and json structures", + "xml": { + "name": "XMLModel" + }, + "properties": { + "id": { + "type": "integer", + "format": "int64", + "xml": { + "attribute": true, + "namespace": "ns1", + "prefix": "urn1" + } + }, + "items": { + "type": "array", + "items": { + "type": "string" + }, + "xml": { + "wrapped": true + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/models.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/models.json new file mode 100644 index 000000000..7eff6dbe0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/models.json @@ -0,0 +1,14 @@ +{ + "definitions": { + "Pet": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/multipleModels.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/multipleModels.json new file mode 100644 index 000000000..f62be6624 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/multipleModels.json @@ -0,0 +1,28 @@ +{ + "definitions": { + "Pet": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + }, + "Dog": { + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithBooleanArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithBooleanArray.json new file mode 100644 index 000000000..e7ae99b07 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithBooleanArray.json @@ -0,0 +1,6 @@ +{ + "type": "array", + "items": { + "type": "boolean" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithByteArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithByteArray.json new file mode 100644 index 000000000..a6cb13c18 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithByteArray.json @@ -0,0 +1,7 @@ +{ + "type": "array", + "items": { + "type": "string", + "format": "byte" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithComplexArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithComplexArray.json new file mode 100644 index 000000000..c2402e0c6 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithComplexArray.json @@ -0,0 +1,6 @@ +{ + "type": "array", + "items": { + "$ref": "ComplexType" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithDateTimeArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithDateTimeArray.json new file mode 100644 index 000000000..8b1077316 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithDateTimeArray.json @@ -0,0 +1,7 @@ +{ + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt32Array.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt32Array.json new file mode 100644 index 000000000..5aae819ca --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt32Array.json @@ -0,0 +1,7 @@ +{ + "type": "array", + "items": { + "type": "integer", + "format": "int32" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt64Array.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt64Array.json new file mode 100644 index 000000000..5b1f55159 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithInt64Array.json @@ -0,0 +1,7 @@ +{ + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithRef.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithRef.json new file mode 100644 index 000000000..d8ffdd589 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithRef.json @@ -0,0 +1,5 @@ +{ + "$ref": "Foo", + "description": "a boolean", + "readOnly": true +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithStringArray.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithStringArray.json new file mode 100644 index 000000000..7936520d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/propertyWithStringArray.json @@ -0,0 +1,6 @@ +{ + "type": "array", + "items": { + "type": "string" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleBooleanProperty.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleBooleanProperty.json new file mode 100644 index 000000000..04153ed86 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleBooleanProperty.json @@ -0,0 +1,5 @@ +{ + "type": "boolean", + "description": "a boolean", + "readOnly": true +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleByteProperty.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleByteProperty.json new file mode 100644 index 000000000..74fc007f7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleByteProperty.json @@ -0,0 +1,4 @@ +{ + "type": "string", + "format": "byte" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleDateTimeProperty.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleDateTimeProperty.json new file mode 100644 index 000000000..a154c6b38 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleDateTimeProperty.json @@ -0,0 +1,4 @@ +{ + "type": "string", + "format": "date-time" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt32Property.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt32Property.json new file mode 100644 index 000000000..b436c52c7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt32Property.json @@ -0,0 +1,4 @@ +{ + "type": "integer", + "format": "int32" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt64Property.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt64Property.json new file mode 100644 index 000000000..1f3419620 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleInt64Property.json @@ -0,0 +1,4 @@ +{ + "type": "integer", + "format": "int64" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleStringProperty.json b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleStringProperty.json new file mode 100644 index 000000000..28ac6e864 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/models/properties/simpleStringProperty.json @@ -0,0 +1,3 @@ +{ + "type": "string" +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/cascadingSchemes.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/cascadingSchemes.json new file mode 100644 index 000000000..68c970292 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/cascadingSchemes.json @@ -0,0 +1,98 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{petId}": { + "get": { + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + }, + "schemes": [ "https" ] + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/commonParameters.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/commonParameters.json new file mode 100644 index 000000000..5a9a12841 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/commonParameters.json @@ -0,0 +1,100 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{id}": { + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to use", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "get": { + "description": "Returns pets based on ID", + "summary": "Find pets by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "type": "array", + "items": { + "$ref": "Pet" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/multipleMimeTypes.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/multipleMimeTypes.json new file mode 100644 index 000000000..fa014914d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/multipleMimeTypes.json @@ -0,0 +1,109 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "text/plain; charset=utf-8", + "application/json", + "application/vnd.github+json", + "application/vnd.github.v3+json", + "application/vnd.github.v3.raw+json", + "application/vnd.github.v3.text+json", + "application/vnd.github.v3.html+json", + "application/vnd.github.v3.full+json", + "application/vnd.github.v3.diff", + "application/vnd.github.v3.patch" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{id}": { + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to use", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "get": { + "description": "Returns pets based on ID", + "summary": "Find pets by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "type": "array", + "items": { + "$ref": "Pet" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/operationWithTags.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/operationWithTags.json new file mode 100644 index 000000000..832560566 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/operationWithTags.json @@ -0,0 +1,30 @@ +{ + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "tags": [ "foo", "bar"], + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "a pet to be returned", + "schema": {"$ref": "Pet"} + }, + "default": { + "description": "Unexpected error", + "schema": {"$ref": "ErrorModel"} + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathAndBoolQueryParamResource.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathAndBoolQueryParamResource.json new file mode 100644 index 000000000..75f0efa28 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathAndBoolQueryParamResource.json @@ -0,0 +1,36 @@ +{ + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "includeDetails", + "in": "query", + "description": "include details in response", + "required": true, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "a pet to be returned", + "schema": {"$ref": "Pet"} + }, + "default": { + "description": "Unexpected error", + "schema": {"$ref": "ErrorModel"} + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathParamResource.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathParamResource.json new file mode 100644 index 000000000..14eba194a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/operations/stringPathParamResource.json @@ -0,0 +1,33 @@ +{ + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "fun", + "schema": {"$ref": "Pet"} + }, + "400": { + "description": "Invalid ID supplied <= this is purely for documentation", + "schema": {"$ref": "ErrorModel"} + }, + "default": { + "description": "Unexpected error", + "schema": {"$ref": "ErrorModel"} + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexArrayParameter.json new file mode 100644 index 000000000..5a4ef2582 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexArrayParameter.json @@ -0,0 +1,13 @@ +{ + "name": "user", + "in": "body", + "description": "user to add to the system", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + }, + "format": "csv" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexParameter.json new file mode 100644 index 000000000..4a5327a81 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyComplexParameter.json @@ -0,0 +1,9 @@ +{ + "name": "user", + "in": "body", + "description": "user to add to the system", + "required": true, + "schema": { + "$ref": "User" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyInt64Parameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyInt64Parameter.json new file mode 100644 index 000000000..5bd668b81 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyInt64Parameter.json @@ -0,0 +1,10 @@ +{ + "name": "id", + "in": "body", + "description": "id to add", + "required": true, + "schema": { + "type": "integer", + "format": "int64" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringArrayParameter.json new file mode 100644 index 000000000..535a3056d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringArrayParameter.json @@ -0,0 +1,12 @@ +{ + "name": "user", + "in": "body", + "description": "user to add to the system", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringParameter.json new file mode 100644 index 000000000..499435a5b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/bodyStringParameter.json @@ -0,0 +1,9 @@ +{ + "name": "user", + "in": "body", + "description": "user to add to the system", + "required": true, + "schema": { + "type": "string" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataComplexParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataComplexParameter.json new file mode 100644 index 000000000..aa09e7160 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataComplexParameter.json @@ -0,0 +1,7 @@ +{ + "name": "firstName", + "in": "formData", + "description": "users first name", + "required": true, + "$ref": "Nothing" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataInt64Parameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataInt64Parameter.json new file mode 100644 index 000000000..c00176cb4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataInt64Parameter.json @@ -0,0 +1,8 @@ +{ + "name": "id", + "in": "formData", + "description": "username to fetch", + "required": true, + "type": "integer", + "format": "int64" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringArrayParameter.json new file mode 100644 index 000000000..9d06ef465 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringArrayParameter.json @@ -0,0 +1,10 @@ +{ + "name": "user", + "in": "formData", + "description": "user to add to the system", + "required": true, + "type": "array", + "items": { + "type": "string" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringParameter.json new file mode 100644 index 000000000..98d66e63d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/formDataStringParameter.json @@ -0,0 +1,7 @@ +{ + "name": "firstName", + "in": "formData", + "description": "users first name", + "required": true, + "type": "string" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerInt64ArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerInt64ArrayParameter.json new file mode 100644 index 000000000..81c8f2f13 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerInt64ArrayParameter.json @@ -0,0 +1,12 @@ +{ + "name": "token", + "in": "header", + "description": "token to be passed as a header", + "required": true, + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "collectionFormat": "csv" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringArrayParameter.json new file mode 100644 index 000000000..d6b134f93 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringArrayParameter.json @@ -0,0 +1,11 @@ +{ + "name": "token", + "in": "header", + "description": "token to be passed as a header", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringParameter.json new file mode 100644 index 000000000..d10051ed2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/headerStringParameter.json @@ -0,0 +1,7 @@ +{ + "name": "token", + "in": "header", + "description": "token to be passed as a header", + "required": true, + "type": "string" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathInt64Parameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathInt64Parameter.json new file mode 100644 index 000000000..47bf2e0b9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathInt64Parameter.json @@ -0,0 +1,8 @@ +{ + "name": "id", + "in": "path", + "description": "username to fetch", + "required": true, + "type": "integer", + "format": "int64" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringArrayParameter.json new file mode 100644 index 000000000..6da46aa5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringArrayParameter.json @@ -0,0 +1,11 @@ +{ + "name": "usernames", + "in": "path", + "description": "usernames to pass", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringParameter.json new file mode 100644 index 000000000..0ac9c6f07 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/pathStringParameter.json @@ -0,0 +1,7 @@ +{ + "name": "username", + "in": "path", + "description": "username to fetch", + "required": true, + "type": "string" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryInt64ArrayParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryInt64ArrayParameter.json new file mode 100644 index 000000000..59b557a6f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryInt64ArrayParameter.json @@ -0,0 +1,12 @@ +{ + "name": "id", + "in": "query", + "description": "ID of the object to fetch", + "required": true, + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "collectionFormat": "csv" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryStringParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryStringParameter.json new file mode 100644 index 000000000..fde2d3bad --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryStringParameter.json @@ -0,0 +1,8 @@ +{ + "name": "id", + "in": "query", + "description": "ID of the object to fetch", + "required": true, + "type": "integer", + "format": "int64" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryWithComplexParameter.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryWithComplexParameter.json new file mode 100644 index 000000000..61f9e9323 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/parameters/queryWithComplexParameter.json @@ -0,0 +1,9 @@ +{ + "name": "id", + "in": "query", + "description": "a complex object which should not validate", + "required": true, + "schema": { + "$ref": "Pet" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithExamplePayload.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithExamplePayload.json new file mode 100644 index 000000000..90767b6a5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithExamplePayload.json @@ -0,0 +1,114 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{petId}": { + "get": { + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + }, + "examples": { + "application/json": { + "id": 9, + "category": { + "name": "domestic" + }, + "name": "monster", + "tags": [ + { + "name": "for sale" + } + ], + "status": "alive" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "example": "cat" + }, + "tag": { + "type": "string", + "example": "for sale" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions.json new file mode 100644 index 000000000..52f9ebd9c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions.json @@ -0,0 +1,62 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{petId}": { + "$ref": "https://raw.githubusercontent.com/reverb/swagger-spec/master/fixtures/v2.0/json/resources/resourceWithLinkedDefinitions_part1.json" + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions_part1.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions_part1.json new file mode 100644 index 000000000..5b7f5b9ad --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithLinkedDefinitions_part1.json @@ -0,0 +1,38 @@ +{ + "get": { + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithRelativeHost.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithRelativeHost.json new file mode 100644 index 000000000..2e16c3a39 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/resourceWithRelativeHost.json @@ -0,0 +1,99 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{id}": { + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to use", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "get": { + "description": "Returns pets based on ID", + "summary": "Find pets by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "type": "array", + "items": { + "$ref": "Pet" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/reusableParameters.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/reusableParameters.json new file mode 100644 index 000000000..3547c419f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/reusableParameters.json @@ -0,0 +1,105 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{id}": { + "get": { + "description": "Returns pets based on ID", + "summary": "Find pets by ID", + "operationId": "getPetsById", + "parameters": [ + { "$ref": "#/parameters/skipParam" }, + { "$ref": "#/parameters/limitParam" } + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "type": "array", + "items": { + "$ref": "Pet" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "parameters": { + "skipParam": { + "name": "skip", + "in": "query", + "description": "number of items to skip", + "required": true, + "type": "integer", + "format": "int32" + }, + "limitParam": { + "name": "limit", + "in": "query", + "description": "max records to return", + "required": true, + "type": "integer", + "format": "int32" + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/securityExample.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/securityExample.json new file mode 100644 index 000000000..e4c482c4d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/securityExample.json @@ -0,0 +1,181 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "security": [ + { + "githubAccessCode": [ "user", "gist" ] + }, + { + "internalApiKey": [] + } + ], + "paths": { + "/pets/{id}": { + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to use", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "get": { + "description": "Returns pets based on ID", + "summary": "Find pets by ID", + "operationId": "getPetsById", + "security": [ + { + "githubAuth":[ + "user:read", + "user:write" + ] + }, + { + "internalApiKey": [] + } + ], + "produces": [ + "application/json", + "text/html" + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "type": "array", + "items": { + "$ref": "Pet" + } + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "securityDefinitions": { + "githubAccessCode": { + "type": "oauth2", + "scopes": { + "user": "Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.", + "user:email": "Grants read access to a user’s email addresses.", + "user:follow": "Grants access to follow or unfollow other users.", + "public_repo": "Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.", + "repo": "Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.", + "repo_deployment": "Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.", + "repo:status": "Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.", + "delete_repo": "Grants access to delete adminable repositories.", + "notifications": "Grants read access to a user’s notifications. repo also provides this access.", + "gist": "Grants write access to gists.", + "read:repo_hook": "Grants read and ping access to hooks in public or private repositories.", + "write:repo_hook": "Grants read, write, and ping access to hooks in public or private repositories.", + "admin:repo_hook": "Grants read, write, ping, and delete access to hooks in public or private repositories.", + "read:org": "Read-only access to organization, teams, and membership.", + "write:org": "Publicize and unpublicize organization membership.", + "admin:org": "Fully manage organization, teams, and memberships.", + "read:public_key": "List and view details for public keys.", + "write:public_key": "Create, list, and view details for public keys.", + "admin:public_key": "Fully manage public keys." + }, + "flow": "accessCode", + "authorizationUrl": "https://github.com/login/oauth/authorize", + "tokenUrl": "https://github.com/login/oauth/access_token" + }, + "petstoreImplicit": { + "type": "oauth2", + "scopes": { + "user": "Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.", + "user:email": "Grants read access to a user’s email addresses.", + "user:follow": "Grants access to follow or unfollow other users.", + "public_repo": "Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.", + "repo": "Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.", + "repo_deployment": "Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.", + "repo:status": "Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.", + "delete_repo": "Grants access to delete adminable repositories.", + "notifications": "Grants read access to a user’s notifications. repo also provides this access.", + "gist": "Grants write access to gists.", + "read:repo_hook": "Grants read and ping access to hooks in public or private repositories.", + "write:repo_hook": "Grants read, write, and ping access to hooks in public or private repositories.", + "admin:repo_hook": "Grants read, write, ping, and delete access to hooks in public or private repositories.", + "read:org": "Read-only access to organization, teams, and membership.", + "write:org": "Publicize and unpublicize organization membership.", + "admin:org": "Fully manage organization, teams, and memberships.", + "read:public_key": "List and view details for public keys.", + "write:public_key": "Create, list, and view details for public keys.", + "admin:public_key": "Fully manage public keys." + }, + "flow": "implicit", + "authorizationUrl": "http://petstore.swagger.wordnik.com/oauth/dialog" + }, + "internalApiKey": { + "type": "apiKey", + "in": "header", + "name": "api_key" + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/stringPathParamResource.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/stringPathParamResource.json new file mode 100644 index 000000000..a438a08ea --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/stringPathParamResource.json @@ -0,0 +1,97 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "/pets/{petId}": { + "get": { + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/taggedResource.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/taggedResource.json new file mode 100644 index 000000000..19394c70d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/taggedResource.json @@ -0,0 +1,112 @@ +{ + "swagger": "2.0", + "x-reverb": { + "addAnythingYouWant": true + }, + "info": { + "x-reverb-info": "this is an example", + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "tags": [ + { + "name": "pets" + } + ], + "paths": { + "x-reverb-path-info": "vendor info", + "/pets": { + "x-vendor-method": {}, + "get": { + "x-vendor-operation-property": {}, + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "x-vendor-parameter-property": {}, + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "x-vendor-operation-response-property": {}, + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "x-vendor-model-property": {}, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/resources/vendorExtensionExamples.json b/vendor/github.com/go-openapi/loads/fixtures/json/resources/vendorExtensionExamples.json new file mode 100644 index 000000000..267ce5399 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/resources/vendorExtensionExamples.json @@ -0,0 +1,107 @@ +{ + "swagger": "2.0", + "x-reverb": { + "addAnythingYouWant": true + }, + "info": { + "x-reverb-info": "this is an example", + "version": "1.0.9-abcd", + "title": "Swagger Sample API", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "termsOfService": "http://helloreverb.com/terms/", + "contact": { + "name": "wordnik api team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "my.api.com", + "basePath": "/v1", + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json", + "application/xml" + ], + "paths": { + "x-reverb-path-info": "vendor info", + "/pets": { + "x-vendor-method": {}, + "get": { + "x-vendor-operation-property": {}, + "description": "Returns a pet based on ID", + "summary": "Find pet by ID", + "operationId": "getPetsById", + "produces": [ + "application/json", + "text/html" + ], + "parameters": [ + { + "x-vendor-parameter-property": {}, + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "csv" + } + ], + "responses": { + "x-vendor-operation-response-property": {}, + "200": { + "description": "pet response", + "schema": { + "$ref": "Pet" + } + }, + "default": { + "description": "error payload", + "schema": { + "$ref": "ErrorModel" + } + } + } + } + } + }, + "definitions": { + "Pet": { + "x-vendor-model-property": {}, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ErrorModel": { + "required": [ "code", "message" ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/complexArrayResponse.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/complexArrayResponse.json new file mode 100644 index 000000000..5d2ce79ee --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/complexArrayResponse.json @@ -0,0 +1,9 @@ +{ + "description": "A complex object array response", + "schema": { + "type": "array", + "items": { + "$ref": "VeryComplexType" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/dateTimeResponse.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/dateTimeResponse.json new file mode 100644 index 000000000..77fe8d442 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/dateTimeResponse.json @@ -0,0 +1,7 @@ +{ + "description": "A date-time response", + "schema": { + "type": "string", + "format": "date-time" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/int32Response.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/int32Response.json new file mode 100644 index 000000000..b9470c8f2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/int32Response.json @@ -0,0 +1,7 @@ +{ + "description": "A simple string response", + "schema": { + "type": "integer", + "format": "int32" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/int64Response.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/int64Response.json new file mode 100644 index 000000000..16a4d1c1c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/int64Response.json @@ -0,0 +1,7 @@ +{ + "description": "A simple string response", + "schema": { + "type": "integer", + "format": "int64" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/multipleResponses.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/multipleResponses.json new file mode 100644 index 000000000..b7c51544e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/multipleResponses.json @@ -0,0 +1,18 @@ +{ + "200": { + "description": "simple string response", + "schema": { + "type": "string" + } + }, + "201": { + "description": "object created" + }, + "default": { + "description": "oops", + "schema": { + "type": "integer", + "format": "int32" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringArrayResponse.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringArrayResponse.json new file mode 100644 index 000000000..4640ea04b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringArrayResponse.json @@ -0,0 +1,9 @@ +{ + "description": "A string array response", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponse.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponse.json new file mode 100644 index 000000000..f79402737 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponse.json @@ -0,0 +1,6 @@ +{ + "description": "A simple string response", + "schema": { + "type": "string" + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponseWithHeader.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponseWithHeader.json new file mode 100644 index 000000000..57a5f80b4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/stringResponseWithHeader.json @@ -0,0 +1,10 @@ +{ + "description": "A simple string response", + "schema": { + "type": "string" + }, + "headers": { + "is-dog": {"type": "boolean"}, + "is-cat": {"type": "boolean"} + } +} diff --git a/vendor/github.com/go-openapi/loads/fixtures/json/responses/voidResponse.json b/vendor/github.com/go-openapi/loads/fixtures/json/responses/voidResponse.json new file mode 100644 index 000000000..a7edcadb3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/json/responses/voidResponse.json @@ -0,0 +1,3 @@ +{ + "description": "object created" +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/.gitkeep b/vendor/github.com/go-openapi/loads/fixtures/yaml/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithArrayRef.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithArrayRef.yaml new file mode 100644 index 000000000..b32a35901 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithArrayRef.yaml @@ -0,0 +1,5 @@ +required: + - id +properties: + id: {type: integer, format: int64} + children: {type: array, items: {$ref: Person}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithComposition.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithComposition.yaml new file mode 100644 index 000000000..f0ea5e80f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithComposition.yaml @@ -0,0 +1,5 @@ +definitions: + Cat: {description: 'A representation of a cat', allOf: [{$ref: '#/models/Pet'}, {properties: {huntingSkill: {type: string, description: 'The measured skill for hunting', default: lazy, enum: [clueless, lazy, adventerous, aggressive]}}, required: [huntingSkill]}]} + Dog: {description: 'A representation of a dog', allOf: [{$ref: '#/models/Pet'}, {properties: {packSize: {type: integer, format: int32, description: 'the size of the pack the dog is from', default: 0}}, required: [name, packSize]}]} + Fish: {description: 'A representation of a fish', allOf: [{$ref: '#/models/Pet'}, {properties: {fins: {type: integer, format: int32, description: 'count of fins'}}, required: [fins]}]} + Pet: {discriminator: petType, properties: {name: {type: string}, petType: {type: string}}, required: [name, petType]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithDateTimeMap.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithDateTimeMap.yaml new file mode 100644 index 000000000..653cbd182 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithDateTimeMap.yaml @@ -0,0 +1,4 @@ +description: 'true' +additionalProperties: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithExamples.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithExamples.yaml new file mode 100644 index 000000000..0252ec176 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithExamples.yaml @@ -0,0 +1,3 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} + Dog: {properties: {id: {type: integer, format: int64}, name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt32Map.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt32Map.yaml new file mode 100644 index 000000000..88ab02447 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt32Map.yaml @@ -0,0 +1,4 @@ +description: 'This is a Map[String, Integer]' +additionalProperties: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt64Map.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt64Map.yaml new file mode 100644 index 000000000..16c790e42 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithInt64Map.yaml @@ -0,0 +1,4 @@ +description: 'true' +additionalProperties: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithMultipleProperties.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithMultipleProperties.yaml new file mode 100644 index 000000000..f69d27ef2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithMultipleProperties.yaml @@ -0,0 +1,14 @@ +description: 'true' +properties: + booleanValue: {type: boolean} + byteValue: {type: string, format: byte} + dateTimeValue: {type: string, format: date-time} + int32Value: {type: integer, format: int32} + int64Value: {type: integer, format: int64} + stringValue: {type: string} + booleanArrayValue: {type: array, items: {type: boolean}} + byteArrayValue: {type: array, items: {type: string, format: byte}} + dateTimeArrayValue: {type: array, items: {type: string, format: date-time}} + int32ArrayValue: {type: array, items: {type: integer, format: int32}} + int64ArrayValue: {type: array, items: {type: integer, format: int64}} + stringArrayValue: {type: array, items: {type: string}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithObjectMap.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithObjectMap.yaml new file mode 100644 index 000000000..e712e0f22 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithObjectMap.yaml @@ -0,0 +1,9 @@ +description: "This is a Map[String, { id: Long, name: String}]" +additionalProperties: + type: "object" + properties: + id: + type: "integer" + format: "int64" + name: + type: "string" diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithPrimitiveArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithPrimitiveArray.yaml new file mode 100644 index 000000000..0837bad52 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithPrimitiveArray.yaml @@ -0,0 +1,5 @@ +required: + - id +properties: + id: {type: integer, format: int64} + childrensAges: {type: array, items: {type: integer, format: int32}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithStringProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithStringProperty.yaml new file mode 100644 index 000000000..a17d21e5a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithStringProperty.yaml @@ -0,0 +1,3 @@ +description: 'true' +properties: + name: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithXmlAttributes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithXmlAttributes.yaml new file mode 100644 index 000000000..2176983f8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/modelWithXmlAttributes.yaml @@ -0,0 +1,6 @@ +description: 'this model serves xml and json structures' +xml: + name: XMLModel +properties: + id: {type: integer, format: int64, xml: {attribute: true, namespace: ns1, prefix: urn1}} + items: {type: array, items: {type: string}, xml: {wrapped: true}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/models.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/models.yaml new file mode 100644 index 000000000..71f8efeb4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/models.yaml @@ -0,0 +1,2 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/multipleModels.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/multipleModels.yaml new file mode 100644 index 000000000..0252ec176 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/multipleModels.yaml @@ -0,0 +1,3 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} + Dog: {properties: {id: {type: integer, format: int64}, name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithBooleanArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithBooleanArray.yaml new file mode 100644 index 000000000..aa02d0e74 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithBooleanArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + type: boolean diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithByteArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithByteArray.yaml new file mode 100644 index 000000000..f31b35036 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithByteArray.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: string + format: byte diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithComplexArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithComplexArray.yaml new file mode 100644 index 000000000..69e0b1765 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithComplexArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + $ref: ComplexType diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithDateTimeArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithDateTimeArray.yaml new file mode 100644 index 000000000..6efef25d8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithDateTimeArray.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt32Array.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt32Array.yaml new file mode 100644 index 000000000..26fd271b0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt32Array.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt64Array.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt64Array.yaml new file mode 100644 index 000000000..e800895eb --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithInt64Array.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithRef.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithRef.yaml new file mode 100644 index 000000000..d78956949 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithRef.yaml @@ -0,0 +1,3 @@ +$ref: Foo +description: 'a boolean' +readOnly: true diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithStringArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithStringArray.yaml new file mode 100644 index 000000000..3a9108235 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/propertyWithStringArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleBooleanProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleBooleanProperty.yaml new file mode 100644 index 000000000..9d1e27382 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleBooleanProperty.yaml @@ -0,0 +1,3 @@ +type: boolean +description: 'a boolean' +readOnly: true diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleByteProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleByteProperty.yaml new file mode 100644 index 000000000..6af2139f9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleByteProperty.yaml @@ -0,0 +1,2 @@ +type: string +format: byte diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleDateTimeProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleDateTimeProperty.yaml new file mode 100644 index 000000000..407b1d7e8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleDateTimeProperty.yaml @@ -0,0 +1,2 @@ +type: string +format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt32Property.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt32Property.yaml new file mode 100644 index 000000000..8ed513dba --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt32Property.yaml @@ -0,0 +1,2 @@ +type: integer +format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt64Property.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt64Property.yaml new file mode 100644 index 000000000..69f359f3f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleInt64Property.yaml @@ -0,0 +1,2 @@ +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleStringProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleStringProperty.yaml new file mode 100644 index 000000000..5c21d88b9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/models/properties/simpleStringProperty.yaml @@ -0,0 +1 @@ +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/cascadingSchemes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/cascadingSchemes.yaml new file mode 100644 index 000000000..d10802774 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/cascadingSchemes.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}, schemes: [https]}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/commonParameters.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/commonParameters.yaml new file mode 100644 index 000000000..9851b7448 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/commonParameters.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/multipleMimeTypes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/multipleMimeTypes.yaml new file mode 100644 index 000000000..23bdc2d2a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/multipleMimeTypes.yaml @@ -0,0 +1,32 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - 'text/plain; charset=utf-8' + - application/json + - application/vnd.github+json + - application/vnd.github.v3+json + - application/vnd.github.v3.raw+json + - application/vnd.github.v3.text+json + - application/vnd.github.v3.html+json + - application/vnd.github.v3.full+json + - application/vnd.github.v3.diff + - application/vnd.github.v3.patch +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/operationWithTags.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/operationWithTags.yaml new file mode 100644 index 000000000..a3c85b26a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/operationWithTags.yaml @@ -0,0 +1,14 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +tags: + - foo + - bar +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} +responses: + '200': {description: 'a pet to be returned', schema: {$ref: Pet}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml new file mode 100644 index 000000000..a00b8c469 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml @@ -0,0 +1,12 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} + - {name: includeDetails, in: query, description: 'include details in response', required: true, type: boolean} +responses: + '200': {description: 'a pet to be returned', schema: {$ref: Pet}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathParamResource.yaml new file mode 100644 index 000000000..8c46cc7ee --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/operations/stringPathParamResource.yaml @@ -0,0 +1,12 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} +responses: + '200': {description: fun, schema: {$ref: Pet}} + '400': {description: 'Invalid ID supplied <= this is purely for documentation', schema: {$ref: ErrorModel}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexArrayParameter.yaml new file mode 100644 index 000000000..511f8dc2b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexArrayParameter.yaml @@ -0,0 +1,8 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: array + items: {type: string} + format: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexParameter.yaml new file mode 100644 index 000000000..13f8fd171 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyComplexParameter.yaml @@ -0,0 +1,6 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + $ref: User diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyInt64Parameter.yaml new file mode 100644 index 000000000..a270d94cb --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyInt64Parameter.yaml @@ -0,0 +1,7 @@ +name: id +in: body +description: 'id to add' +required: true +schema: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringArrayParameter.yaml new file mode 100644 index 000000000..7d88b10dc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringArrayParameter.yaml @@ -0,0 +1,7 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: array + items: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringParameter.yaml new file mode 100644 index 000000000..7fe9dbd1e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/bodyStringParameter.yaml @@ -0,0 +1,6 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataComplexParameter.yaml new file mode 100644 index 000000000..b49dd9e3d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataComplexParameter.yaml @@ -0,0 +1,5 @@ +name: firstName +in: formData +description: 'users first name' +required: true +$ref: Nothing diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataInt64Parameter.yaml new file mode 100644 index 000000000..0faaa7f62 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataInt64Parameter.yaml @@ -0,0 +1,6 @@ +name: id +in: formData +description: 'username to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringArrayParameter.yaml new file mode 100644 index 000000000..f6534662a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringArrayParameter.yaml @@ -0,0 +1,7 @@ +name: user +in: formData +description: 'user to add to the system' +required: true +type: array +items: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringParameter.yaml new file mode 100644 index 000000000..43411cf04 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/formDataStringParameter.yaml @@ -0,0 +1,5 @@ +name: firstName +in: formData +description: 'users first name' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerInt64ArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerInt64ArrayParameter.yaml new file mode 100644 index 000000000..87ec99cab --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerInt64ArrayParameter.yaml @@ -0,0 +1,9 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: array +items: + type: integer + format: int64 +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringArrayParameter.yaml new file mode 100644 index 000000000..70e0a0a68 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringArrayParameter.yaml @@ -0,0 +1,8 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: array +items: + type: string +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringParameter.yaml new file mode 100644 index 000000000..ac99c2f62 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/headerStringParameter.yaml @@ -0,0 +1,5 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathInt64Parameter.yaml new file mode 100644 index 000000000..3bd860aac --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathInt64Parameter.yaml @@ -0,0 +1,6 @@ +name: id +in: path +description: 'username to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringArrayParameter.yaml new file mode 100644 index 000000000..ebb24a7e2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringArrayParameter.yaml @@ -0,0 +1,8 @@ +name: usernames +in: path +description: 'usernames to pass' +required: true +type: array +items: + type: string +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringParameter.yaml new file mode 100644 index 000000000..2cb4fb2ee --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/pathStringParameter.yaml @@ -0,0 +1,5 @@ +name: username +in: path +description: 'username to fetch' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryInt64ArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryInt64ArrayParameter.yaml new file mode 100644 index 000000000..453e24f5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryInt64ArrayParameter.yaml @@ -0,0 +1,9 @@ +name: id +in: query +description: 'ID of the object to fetch' +required: true +type: array +items: + type: integer + format: int64 +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryStringParameter.yaml new file mode 100644 index 000000000..e1df68530 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryStringParameter.yaml @@ -0,0 +1,6 @@ +name: id +in: query +description: 'ID of the object to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryWithComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryWithComplexParameter.yaml new file mode 100644 index 000000000..80bc7951b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/parameters/queryWithComplexParameter.yaml @@ -0,0 +1,6 @@ +name: id +in: query +description: 'a complex object which should not validate' +required: true +schema: + $ref: Pet diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithExamplePayload.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithExamplePayload.yaml new file mode 100644 index 000000000..9828d9605 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithExamplePayload.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}, examples: {application/json: {id: 9, category: {name: domestic}, name: monster, tags: [{name: 'for sale'}], status: alive}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string, example: cat}, tag: {type: string, example: 'for sale'}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions.yaml new file mode 100644 index 000000000..b9b00c47f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {$ref: 'https://raw.githubusercontent.com/reverb/swagger-spec/master/fixtures/v2.0/json/resources/resourceWithLinkedDefinitions_part1.json'} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions_part1.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions_part1.yaml new file mode 100644 index 000000000..7d358fbf1 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithLinkedDefinitions_part1.yaml @@ -0,0 +1,7 @@ +get: + description: 'Returns a pet based on ID' + summary: 'Find pet by ID' + operationId: getPetsById + produces: [application/json, text/html] + parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}] + responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithRelativeHost.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithRelativeHost.yaml new file mode 100644 index 000000000..744aa72e9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/resourceWithRelativeHost.yaml @@ -0,0 +1,22 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/reusableParameters.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/reusableParameters.yaml new file mode 100644 index 000000000..1d258d082 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/reusableParameters.yaml @@ -0,0 +1,26 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, parameters: [{$ref: '#/parameters/skipParam'}, {$ref: '#/parameters/limitParam'}], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +parameters: + skipParam: {name: skip, in: query, description: 'number of items to skip', required: true, type: integer, format: int32} + limitParam: {name: limit, in: query, description: 'max records to return', required: true, type: integer, format: int32} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/securityExample.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/securityExample.yaml new file mode 100644 index 000000000..007d26db4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/securityExample.yaml @@ -0,0 +1,29 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +security: + - {githubAccessCode: [user, gist]} + - {internalApiKey: []} +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, security: [{githubAuth: ['user:read', 'user:write']}, {internalApiKey: []}], produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +securityDefinitions: + githubAccessCode: {type: oauth2, scopes: {user: 'Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.', 'user:email': 'Grants read access to a user’s email addresses.', 'user:follow': 'Grants access to follow or unfollow other users.', public_repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.', repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.', repo_deployment: 'Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.', 'repo:status': 'Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.', delete_repo: 'Grants access to delete adminable repositories.', notifications: 'Grants read access to a user’s notifications. repo also provides this access.', gist: 'Grants write access to gists.', 'read:repo_hook': 'Grants read and ping access to hooks in public or private repositories.', 'write:repo_hook': 'Grants read, write, and ping access to hooks in public or private repositories.', 'admin:repo_hook': 'Grants read, write, ping, and delete access to hooks in public or private repositories.', 'read:org': 'Read-only access to organization, teams, and membership.', 'write:org': 'Publicize and unpublicize organization membership.', 'admin:org': 'Fully manage organization, teams, and memberships.', 'read:public_key': 'List and view details for public keys.', 'write:public_key': 'Create, list, and view details for public keys.', 'admin:public_key': 'Fully manage public keys.'}, flow: accessCode, authorizationUrl: 'https://github.com/login/oauth/authorize', tokenUrl: 'https://github.com/login/oauth/access_token'} + petstoreImplicit: {type: oauth2, scopes: {user: 'Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.', 'user:email': 'Grants read access to a user’s email addresses.', 'user:follow': 'Grants access to follow or unfollow other users.', public_repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.', repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.', repo_deployment: 'Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.', 'repo:status': 'Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.', delete_repo: 'Grants access to delete adminable repositories.', notifications: 'Grants read access to a user’s notifications. repo also provides this access.', gist: 'Grants write access to gists.', 'read:repo_hook': 'Grants read and ping access to hooks in public or private repositories.', 'write:repo_hook': 'Grants read, write, and ping access to hooks in public or private repositories.', 'admin:repo_hook': 'Grants read, write, ping, and delete access to hooks in public or private repositories.', 'read:org': 'Read-only access to organization, teams, and membership.', 'write:org': 'Publicize and unpublicize organization membership.', 'admin:org': 'Fully manage organization, teams, and memberships.', 'read:public_key': 'List and view details for public keys.', 'write:public_key': 'Create, list, and view details for public keys.', 'admin:public_key': 'Fully manage public keys.'}, flow: implicit, authorizationUrl: 'http://petstore.swagger.wordnik.com/oauth/dialog'} + internalApiKey: {type: apiKey, in: header, name: api_key} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/stringPathParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/stringPathParamResource.yaml new file mode 100644 index 000000000..a4a83fea0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/stringPathParamResource.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/taggedResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/taggedResource.yaml new file mode 100644 index 000000000..1b8620e52 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/taggedResource.yaml @@ -0,0 +1,29 @@ +swagger: '2.0' +x-reverb: + addAnythingYouWant: true +info: + x-reverb-info: 'this is an example' + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +tags: + - {name: pets} +paths: + x-reverb-path-info: 'vendor info' + /pets: {x-vendor-method: {}, get: {x-vendor-operation-property: {}, description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{x-vendor-parameter-property: {}, name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, x-vendor-operation-response-property: {}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {x-vendor-model-property: {}, required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/vendorExtensionExamples.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/vendorExtensionExamples.yaml new file mode 100644 index 000000000..59bba0f3d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/resources/vendorExtensionExamples.yaml @@ -0,0 +1,27 @@ +swagger: '2.0' +x-reverb: + addAnythingYouWant: true +info: + x-reverb-info: 'this is an example' + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + x-reverb-path-info: 'vendor info' + /pets: {x-vendor-method: {}, get: {x-vendor-operation-property: {}, description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{x-vendor-parameter-property: {}, name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, x-vendor-operation-response-property: {}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {x-vendor-model-property: {}, required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/complexArrayResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/complexArrayResponse.yaml new file mode 100644 index 000000000..bc5ce9534 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/complexArrayResponse.yaml @@ -0,0 +1,4 @@ +description: 'A complex object array response' +schema: + type: array + items: {$ref: VeryComplexType} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/dateTimeResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/dateTimeResponse.yaml new file mode 100644 index 000000000..9bc3e8043 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/dateTimeResponse.yaml @@ -0,0 +1,4 @@ +description: 'A date-time response' +schema: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int32Response.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int32Response.yaml new file mode 100644 index 000000000..e4456c9fe --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int32Response.yaml @@ -0,0 +1,4 @@ +description: 'A simple string response' +schema: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int64Response.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int64Response.yaml new file mode 100644 index 000000000..ca58b2a5f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/int64Response.yaml @@ -0,0 +1,4 @@ +description: 'A simple string response' +schema: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/multipleResponses.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/multipleResponses.yaml new file mode 100644 index 000000000..4cd1312c5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/multipleResponses.yaml @@ -0,0 +1,8 @@ +'200': + description: 'simple string response' + schema: {type: string} +'201': + description: 'object created' +default: + description: oops + schema: {type: integer, format: int32} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringArrayResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringArrayResponse.yaml new file mode 100644 index 000000000..cab5fad8c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringArrayResponse.yaml @@ -0,0 +1,4 @@ +description: 'A string array response' +schema: + type: array + items: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponse.yaml new file mode 100644 index 000000000..61a25e510 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponse.yaml @@ -0,0 +1,3 @@ +description: 'A simple string response' +schema: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponseWithHeader.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponseWithHeader.yaml new file mode 100644 index 000000000..77c2c7561 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/stringResponseWithHeader.yaml @@ -0,0 +1,6 @@ +description: 'A simple string response' +schema: + type: string +headers: + is-dog: {type: boolean} + is-cat: {type: boolean} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/voidResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/voidResponse.yaml new file mode 100644 index 000000000..f2aa5e124 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/responses/voidResponse.yaml @@ -0,0 +1 @@ +description: 'object created' diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml new file mode 100644 index 000000000..dcaf3ff80 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml @@ -0,0 +1,42 @@ +swagger: '2.0' +info: + title: Something + contact: + name: Somebody + url: https://url.com + email: email@url.com + description: Something + version: v1 +host: security.sonusnet.com +schemes: +- https +- http +basePath: /api +produces: +- application/json +- plain/text + +paths: + /whatnot: + get: + description: Get something + responses: + 200: + description: The something + schema: + $ref: '#/definitions/Something' + 500: + description: Oops + +definitions: + Something: + description: A collection of service events + type: object + properties: + page: + $ref: '../../../../shared/definitions/page.yaml#/definitions/Page' + something: + #type: array + #description: An array of something + #items: + $ref: '../../../../shared/something.yaml#/definitions/Something' diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml new file mode 100644 index 000000000..29355d42d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml @@ -0,0 +1,18 @@ +definitions: + Page: + description: A description of a paged result + type: object + properties: + page: + type: integer + description: the page that was requested + pages: + type: integer + description: the total number of pages available + total_items: + type: integer + description: the total number of items available + format: int64 + page_items: + type: integer + description: the number of items per page requested diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml new file mode 100644 index 000000000..f5f2e3919 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml @@ -0,0 +1,44 @@ +swagger: '2.0' +info: + title: Something definitions + contact: + name: Somebody + url: https://url.com + email: email@url.com + description: Something + version: v1 +host: security.sonusnet.com +schemes: +- https +- http +basePath: /api/something/definitions +produces: +- application/json +- plain/text + +paths: + /shared: + get: + operationId: Get + tags: + - Shared + responses: + 200: + description: OK + schema: + properties: + name: + type: string + +definitions: + Something: + description: Something + type: object + properties: + p1: + type: string + description: A string + p2: + type: integer + description: An integer + diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml new file mode 100644 index 000000000..7e6b1331a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml @@ -0,0 +1,38 @@ +swagger: "2.0" +info: + version: 0.1.1 + title: test 1 + description: recursively following JSON references + contact: + name: Fred + +schemes: + - http + +consumes: + - application/json +produces: + - application/json + +paths: + /getAll: + get: + operationId: getAll + parameters: + - name: a + in: body + description: max number of results + required: false + schema: + $ref: '#/definitions/a' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/b' + +definitions: + a: + type: string + b: + $ref: './test3-ter-model-schema.json#/definitions/b' \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json new file mode 100644 index 000000000..eb446c762 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json @@ -0,0 +1,14 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "./test3-model-schema.json", + "title": "test3-model-schema", + "description": "Test schema responses", + "definitions": { + "b": { + "type": "array", + "items": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/.gitkeep b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithArrayRef.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithArrayRef.yaml new file mode 100644 index 000000000..b32a35901 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithArrayRef.yaml @@ -0,0 +1,5 @@ +required: + - id +properties: + id: {type: integer, format: int64} + children: {type: array, items: {$ref: Person}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithComposition.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithComposition.yaml new file mode 100644 index 000000000..f0ea5e80f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithComposition.yaml @@ -0,0 +1,5 @@ +definitions: + Cat: {description: 'A representation of a cat', allOf: [{$ref: '#/models/Pet'}, {properties: {huntingSkill: {type: string, description: 'The measured skill for hunting', default: lazy, enum: [clueless, lazy, adventerous, aggressive]}}, required: [huntingSkill]}]} + Dog: {description: 'A representation of a dog', allOf: [{$ref: '#/models/Pet'}, {properties: {packSize: {type: integer, format: int32, description: 'the size of the pack the dog is from', default: 0}}, required: [name, packSize]}]} + Fish: {description: 'A representation of a fish', allOf: [{$ref: '#/models/Pet'}, {properties: {fins: {type: integer, format: int32, description: 'count of fins'}}, required: [fins]}]} + Pet: {discriminator: petType, properties: {name: {type: string}, petType: {type: string}}, required: [name, petType]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithDateTimeMap.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithDateTimeMap.yaml new file mode 100644 index 000000000..653cbd182 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithDateTimeMap.yaml @@ -0,0 +1,4 @@ +description: 'true' +additionalProperties: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithExamples.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithExamples.yaml new file mode 100644 index 000000000..0252ec176 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithExamples.yaml @@ -0,0 +1,3 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} + Dog: {properties: {id: {type: integer, format: int64}, name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt32Map.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt32Map.yaml new file mode 100644 index 000000000..88ab02447 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt32Map.yaml @@ -0,0 +1,4 @@ +description: 'This is a Map[String, Integer]' +additionalProperties: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt64Map.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt64Map.yaml new file mode 100644 index 000000000..16c790e42 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithInt64Map.yaml @@ -0,0 +1,4 @@ +description: 'true' +additionalProperties: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithMultipleProperties.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithMultipleProperties.yaml new file mode 100644 index 000000000..f69d27ef2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithMultipleProperties.yaml @@ -0,0 +1,14 @@ +description: 'true' +properties: + booleanValue: {type: boolean} + byteValue: {type: string, format: byte} + dateTimeValue: {type: string, format: date-time} + int32Value: {type: integer, format: int32} + int64Value: {type: integer, format: int64} + stringValue: {type: string} + booleanArrayValue: {type: array, items: {type: boolean}} + byteArrayValue: {type: array, items: {type: string, format: byte}} + dateTimeArrayValue: {type: array, items: {type: string, format: date-time}} + int32ArrayValue: {type: array, items: {type: integer, format: int32}} + int64ArrayValue: {type: array, items: {type: integer, format: int64}} + stringArrayValue: {type: array, items: {type: string}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithObjectMap.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithObjectMap.yaml new file mode 100644 index 000000000..e712e0f22 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithObjectMap.yaml @@ -0,0 +1,9 @@ +description: "This is a Map[String, { id: Long, name: String}]" +additionalProperties: + type: "object" + properties: + id: + type: "integer" + format: "int64" + name: + type: "string" diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithPrimitiveArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithPrimitiveArray.yaml new file mode 100644 index 000000000..0837bad52 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithPrimitiveArray.yaml @@ -0,0 +1,5 @@ +required: + - id +properties: + id: {type: integer, format: int64} + childrensAges: {type: array, items: {type: integer, format: int32}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithStringProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithStringProperty.yaml new file mode 100644 index 000000000..a17d21e5a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithStringProperty.yaml @@ -0,0 +1,3 @@ +description: 'true' +properties: + name: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithXmlAttributes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithXmlAttributes.yaml new file mode 100644 index 000000000..2176983f8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/modelWithXmlAttributes.yaml @@ -0,0 +1,6 @@ +description: 'this model serves xml and json structures' +xml: + name: XMLModel +properties: + id: {type: integer, format: int64, xml: {attribute: true, namespace: ns1, prefix: urn1}} + items: {type: array, items: {type: string}, xml: {wrapped: true}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/models.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/models.yaml new file mode 100644 index 000000000..71f8efeb4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/models.yaml @@ -0,0 +1,2 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/multipleModels.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/multipleModels.yaml new file mode 100644 index 000000000..0252ec176 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/multipleModels.yaml @@ -0,0 +1,3 @@ +definitions: + Pet: {properties: {name: {type: string}}, required: [name]} + Dog: {properties: {id: {type: integer, format: int64}, name: {type: string}}, required: [name]} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithBooleanArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithBooleanArray.yaml new file mode 100644 index 000000000..aa02d0e74 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithBooleanArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + type: boolean diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithByteArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithByteArray.yaml new file mode 100644 index 000000000..f31b35036 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithByteArray.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: string + format: byte diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithComplexArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithComplexArray.yaml new file mode 100644 index 000000000..69e0b1765 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithComplexArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + $ref: ComplexType diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithDateTimeArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithDateTimeArray.yaml new file mode 100644 index 000000000..6efef25d8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithDateTimeArray.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt32Array.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt32Array.yaml new file mode 100644 index 000000000..26fd271b0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt32Array.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt64Array.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt64Array.yaml new file mode 100644 index 000000000..e800895eb --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithInt64Array.yaml @@ -0,0 +1,4 @@ +type: array +items: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithRef.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithRef.yaml new file mode 100644 index 000000000..d78956949 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithRef.yaml @@ -0,0 +1,3 @@ +$ref: Foo +description: 'a boolean' +readOnly: true diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithStringArray.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithStringArray.yaml new file mode 100644 index 000000000..3a9108235 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/propertyWithStringArray.yaml @@ -0,0 +1,3 @@ +type: array +items: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleBooleanProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleBooleanProperty.yaml new file mode 100644 index 000000000..9d1e27382 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleBooleanProperty.yaml @@ -0,0 +1,3 @@ +type: boolean +description: 'a boolean' +readOnly: true diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleByteProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleByteProperty.yaml new file mode 100644 index 000000000..6af2139f9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleByteProperty.yaml @@ -0,0 +1,2 @@ +type: string +format: byte diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleDateTimeProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleDateTimeProperty.yaml new file mode 100644 index 000000000..407b1d7e8 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleDateTimeProperty.yaml @@ -0,0 +1,2 @@ +type: string +format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt32Property.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt32Property.yaml new file mode 100644 index 000000000..8ed513dba --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt32Property.yaml @@ -0,0 +1,2 @@ +type: integer +format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt64Property.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt64Property.yaml new file mode 100644 index 000000000..69f359f3f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleInt64Property.yaml @@ -0,0 +1,2 @@ +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleStringProperty.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleStringProperty.yaml new file mode 100644 index 000000000..5c21d88b9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/models/properties/simpleStringProperty.yaml @@ -0,0 +1 @@ +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/cascadingSchemes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/cascadingSchemes.yaml new file mode 100644 index 000000000..d10802774 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/cascadingSchemes.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}, schemes: [https]}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/commonParameters.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/commonParameters.yaml new file mode 100644 index 000000000..9851b7448 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/commonParameters.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/multipleMimeTypes.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/multipleMimeTypes.yaml new file mode 100644 index 000000000..23bdc2d2a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/multipleMimeTypes.yaml @@ -0,0 +1,32 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - 'text/plain; charset=utf-8' + - application/json + - application/vnd.github+json + - application/vnd.github.v3+json + - application/vnd.github.v3.raw+json + - application/vnd.github.v3.text+json + - application/vnd.github.v3.html+json + - application/vnd.github.v3.full+json + - application/vnd.github.v3.diff + - application/vnd.github.v3.patch +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/operationWithTags.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/operationWithTags.yaml new file mode 100644 index 000000000..a3c85b26a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/operationWithTags.yaml @@ -0,0 +1,14 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +tags: + - foo + - bar +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} +responses: + '200': {description: 'a pet to be returned', schema: {$ref: Pet}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml new file mode 100644 index 000000000..a00b8c469 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathAndBoolQueryParamResource.yaml @@ -0,0 +1,12 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} + - {name: includeDetails, in: query, description: 'include details in response', required: true, type: boolean} +responses: + '200': {description: 'a pet to be returned', schema: {$ref: Pet}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathParamResource.yaml new file mode 100644 index 000000000..8c46cc7ee --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/operations/stringPathParamResource.yaml @@ -0,0 +1,12 @@ +description: 'Returns a pet based on ID' +summary: 'Find pet by ID' +operationId: getPetsById +produces: + - application/json + - text/html +parameters: + - {name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: integer, format: int64} +responses: + '200': {description: fun, schema: {$ref: Pet}} + '400': {description: 'Invalid ID supplied <= this is purely for documentation', schema: {$ref: ErrorModel}} + default: {description: 'Unexpected error', schema: {$ref: ErrorModel}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexArrayParameter.yaml new file mode 100644 index 000000000..511f8dc2b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexArrayParameter.yaml @@ -0,0 +1,8 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: array + items: {type: string} + format: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexParameter.yaml new file mode 100644 index 000000000..13f8fd171 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyComplexParameter.yaml @@ -0,0 +1,6 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + $ref: User diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyInt64Parameter.yaml new file mode 100644 index 000000000..a270d94cb --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyInt64Parameter.yaml @@ -0,0 +1,7 @@ +name: id +in: body +description: 'id to add' +required: true +schema: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringArrayParameter.yaml new file mode 100644 index 000000000..7d88b10dc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringArrayParameter.yaml @@ -0,0 +1,7 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: array + items: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringParameter.yaml new file mode 100644 index 000000000..7fe9dbd1e --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/bodyStringParameter.yaml @@ -0,0 +1,6 @@ +name: user +in: body +description: 'user to add to the system' +required: true +schema: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataComplexParameter.yaml new file mode 100644 index 000000000..b49dd9e3d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataComplexParameter.yaml @@ -0,0 +1,5 @@ +name: firstName +in: formData +description: 'users first name' +required: true +$ref: Nothing diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataInt64Parameter.yaml new file mode 100644 index 000000000..0faaa7f62 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataInt64Parameter.yaml @@ -0,0 +1,6 @@ +name: id +in: formData +description: 'username to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringArrayParameter.yaml new file mode 100644 index 000000000..f6534662a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringArrayParameter.yaml @@ -0,0 +1,7 @@ +name: user +in: formData +description: 'user to add to the system' +required: true +type: array +items: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringParameter.yaml new file mode 100644 index 000000000..43411cf04 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/formDataStringParameter.yaml @@ -0,0 +1,5 @@ +name: firstName +in: formData +description: 'users first name' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerInt64ArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerInt64ArrayParameter.yaml new file mode 100644 index 000000000..87ec99cab --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerInt64ArrayParameter.yaml @@ -0,0 +1,9 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: array +items: + type: integer + format: int64 +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringArrayParameter.yaml new file mode 100644 index 000000000..70e0a0a68 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringArrayParameter.yaml @@ -0,0 +1,8 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: array +items: + type: string +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringParameter.yaml new file mode 100644 index 000000000..ac99c2f62 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/headerStringParameter.yaml @@ -0,0 +1,5 @@ +name: token +in: header +description: 'token to be passed as a header' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathInt64Parameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathInt64Parameter.yaml new file mode 100644 index 000000000..3bd860aac --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathInt64Parameter.yaml @@ -0,0 +1,6 @@ +name: id +in: path +description: 'username to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringArrayParameter.yaml new file mode 100644 index 000000000..ebb24a7e2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringArrayParameter.yaml @@ -0,0 +1,8 @@ +name: usernames +in: path +description: 'usernames to pass' +required: true +type: array +items: + type: string +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringParameter.yaml new file mode 100644 index 000000000..2cb4fb2ee --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/pathStringParameter.yaml @@ -0,0 +1,5 @@ +name: username +in: path +description: 'username to fetch' +required: true +type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryInt64ArrayParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryInt64ArrayParameter.yaml new file mode 100644 index 000000000..453e24f5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryInt64ArrayParameter.yaml @@ -0,0 +1,9 @@ +name: id +in: query +description: 'ID of the object to fetch' +required: true +type: array +items: + type: integer + format: int64 +collectionFormat: csv diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryStringParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryStringParameter.yaml new file mode 100644 index 000000000..e1df68530 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryStringParameter.yaml @@ -0,0 +1,6 @@ +name: id +in: query +description: 'ID of the object to fetch' +required: true +type: integer +format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryWithComplexParameter.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryWithComplexParameter.yaml new file mode 100644 index 000000000..80bc7951b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/parameters/queryWithComplexParameter.yaml @@ -0,0 +1,6 @@ +name: id +in: query +description: 'a complex object which should not validate' +required: true +schema: + $ref: Pet diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithExamplePayload.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithExamplePayload.yaml new file mode 100644 index 000000000..9828d9605 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithExamplePayload.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}, examples: {application/json: {id: 9, category: {name: domestic}, name: monster, tags: [{name: 'for sale'}], status: alive}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string, example: cat}, tag: {type: string, example: 'for sale'}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions.yaml new file mode 100644 index 000000000..b9b00c47f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {$ref: 'https://raw.githubusercontent.com/reverb/swagger-spec/master/fixtures/v2.0/json/resources/resourceWithLinkedDefinitions_part1.json'} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions_part1.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions_part1.yaml new file mode 100644 index 000000000..7d358fbf1 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithLinkedDefinitions_part1.yaml @@ -0,0 +1,7 @@ +get: + description: 'Returns a pet based on ID' + summary: 'Find pet by ID' + operationId: getPetsById + produces: [application/json, text/html] + parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}] + responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithRelativeHost.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithRelativeHost.yaml new file mode 100644 index 000000000..744aa72e9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/resourceWithRelativeHost.yaml @@ -0,0 +1,22 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/reusableParameters.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/reusableParameters.yaml new file mode 100644 index 000000000..1d258d082 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/reusableParameters.yaml @@ -0,0 +1,26 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{id}': {get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, parameters: [{$ref: '#/parameters/skipParam'}, {$ref: '#/parameters/limitParam'}], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +parameters: + skipParam: {name: skip, in: query, description: 'number of items to skip', required: true, type: integer, format: int32} + limitParam: {name: limit, in: query, description: 'max records to return', required: true, type: integer, format: int32} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/securityExample.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/securityExample.yaml new file mode 100644 index 000000000..007d26db4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/securityExample.yaml @@ -0,0 +1,29 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +security: + - {githubAccessCode: [user, gist]} + - {internalApiKey: []} +paths: + '/pets/{id}': {parameters: [{name: id, in: path, description: 'ID of pet to use', required: true, type: array, items: {type: string}, collectionFormat: csv}], get: {description: 'Returns pets based on ID', summary: 'Find pets by ID', operationId: getPetsById, security: [{githubAuth: ['user:read', 'user:write']}, {internalApiKey: []}], produces: [application/json, text/html], responses: {'200': {description: 'pet response', schema: {type: array, items: {$ref: Pet}}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +securityDefinitions: + githubAccessCode: {type: oauth2, scopes: {user: 'Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.', 'user:email': 'Grants read access to a user’s email addresses.', 'user:follow': 'Grants access to follow or unfollow other users.', public_repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.', repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.', repo_deployment: 'Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.', 'repo:status': 'Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.', delete_repo: 'Grants access to delete adminable repositories.', notifications: 'Grants read access to a user’s notifications. repo also provides this access.', gist: 'Grants write access to gists.', 'read:repo_hook': 'Grants read and ping access to hooks in public or private repositories.', 'write:repo_hook': 'Grants read, write, and ping access to hooks in public or private repositories.', 'admin:repo_hook': 'Grants read, write, ping, and delete access to hooks in public or private repositories.', 'read:org': 'Read-only access to organization, teams, and membership.', 'write:org': 'Publicize and unpublicize organization membership.', 'admin:org': 'Fully manage organization, teams, and memberships.', 'read:public_key': 'List and view details for public keys.', 'write:public_key': 'Create, list, and view details for public keys.', 'admin:public_key': 'Fully manage public keys.'}, flow: accessCode, authorizationUrl: 'https://github.com/login/oauth/authorize', tokenUrl: 'https://github.com/login/oauth/access_token'} + petstoreImplicit: {type: oauth2, scopes: {user: 'Grants read/write access to profile info only. Note that this scope includes user:email and user:follow.', 'user:email': 'Grants read access to a user’s email addresses.', 'user:follow': 'Grants access to follow or unfollow other users.', public_repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public repositories and organizations.', repo: 'Grants read/write access to code, commit statuses, and deployment statuses for public and private repositories and organizations.', repo_deployment: 'Grants access to deployment statuses for public and private repositories. This scope is only necessary to grant other users or services access to deployment statuses, without granting access to the code.', 'repo:status': 'Grants read/write access to public and private repository commit statuses. This scope is only necessary to grant other users or services access to private repository commit statuses without granting access to the code.', delete_repo: 'Grants access to delete adminable repositories.', notifications: 'Grants read access to a user’s notifications. repo also provides this access.', gist: 'Grants write access to gists.', 'read:repo_hook': 'Grants read and ping access to hooks in public or private repositories.', 'write:repo_hook': 'Grants read, write, and ping access to hooks in public or private repositories.', 'admin:repo_hook': 'Grants read, write, ping, and delete access to hooks in public or private repositories.', 'read:org': 'Read-only access to organization, teams, and membership.', 'write:org': 'Publicize and unpublicize organization membership.', 'admin:org': 'Fully manage organization, teams, and memberships.', 'read:public_key': 'List and view details for public keys.', 'write:public_key': 'Create, list, and view details for public keys.', 'admin:public_key': 'Fully manage public keys.'}, flow: implicit, authorizationUrl: 'http://petstore.swagger.wordnik.com/oauth/dialog'} + internalApiKey: {type: apiKey, in: header, name: api_key} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/stringPathParamResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/stringPathParamResource.yaml new file mode 100644 index 000000000..a4a83fea0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/stringPathParamResource.yaml @@ -0,0 +1,23 @@ +swagger: '2.0' +info: + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + '/pets/{petId}': {get: {description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/taggedResource.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/taggedResource.yaml new file mode 100644 index 000000000..1b8620e52 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/taggedResource.yaml @@ -0,0 +1,29 @@ +swagger: '2.0' +x-reverb: + addAnythingYouWant: true +info: + x-reverb-info: 'this is an example' + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +tags: + - {name: pets} +paths: + x-reverb-path-info: 'vendor info' + /pets: {x-vendor-method: {}, get: {x-vendor-operation-property: {}, description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{x-vendor-parameter-property: {}, name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, x-vendor-operation-response-property: {}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {x-vendor-model-property: {}, required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/vendorExtensionExamples.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/vendorExtensionExamples.yaml new file mode 100644 index 000000000..59bba0f3d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/resources/vendorExtensionExamples.yaml @@ -0,0 +1,27 @@ +swagger: '2.0' +x-reverb: + addAnythingYouWant: true +info: + x-reverb-info: 'this is an example' + version: 1.0.9-abcd + title: 'Swagger Sample API' + description: 'A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification' + termsOfService: 'http://helloreverb.com/terms/' + contact: {name: 'wordnik api team', url: 'http://developer.wordnik.com'} + license: {name: 'Creative Commons 4.0 International', url: 'http://creativecommons.org/licenses/by/4.0/'} +host: my.api.com +basePath: /v1 +schemes: + - http + - https +consumes: + - application/json +produces: + - application/json + - application/xml +paths: + x-reverb-path-info: 'vendor info' + /pets: {x-vendor-method: {}, get: {x-vendor-operation-property: {}, description: 'Returns a pet based on ID', summary: 'Find pet by ID', operationId: getPetsById, produces: [application/json, text/html], parameters: [{x-vendor-parameter-property: {}, name: petId, in: path, description: 'ID of pet that needs to be fetched', required: true, type: array, items: {type: string}, collectionFormat: csv}], responses: {'200': {description: 'pet response', schema: {$ref: Pet}}, x-vendor-operation-response-property: {}, default: {description: 'error payload', schema: {$ref: ErrorModel}}}}} +definitions: + Pet: {x-vendor-model-property: {}, required: [name], properties: {name: {type: string}, tag: {type: string}}} + ErrorModel: {required: [code, message], properties: {code: {type: integer, format: int32}, message: {type: string}}} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/complexArrayResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/complexArrayResponse.yaml new file mode 100644 index 000000000..bc5ce9534 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/complexArrayResponse.yaml @@ -0,0 +1,4 @@ +description: 'A complex object array response' +schema: + type: array + items: {$ref: VeryComplexType} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/dateTimeResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/dateTimeResponse.yaml new file mode 100644 index 000000000..9bc3e8043 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/dateTimeResponse.yaml @@ -0,0 +1,4 @@ +description: 'A date-time response' +schema: + type: string + format: date-time diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int32Response.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int32Response.yaml new file mode 100644 index 000000000..e4456c9fe --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int32Response.yaml @@ -0,0 +1,4 @@ +description: 'A simple string response' +schema: + type: integer + format: int32 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int64Response.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int64Response.yaml new file mode 100644 index 000000000..ca58b2a5f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/int64Response.yaml @@ -0,0 +1,4 @@ +description: 'A simple string response' +schema: + type: integer + format: int64 diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/multipleResponses.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/multipleResponses.yaml new file mode 100644 index 000000000..4cd1312c5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/multipleResponses.yaml @@ -0,0 +1,8 @@ +'200': + description: 'simple string response' + schema: {type: string} +'201': + description: 'object created' +default: + description: oops + schema: {type: integer, format: int32} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringArrayResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringArrayResponse.yaml new file mode 100644 index 000000000..cab5fad8c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringArrayResponse.yaml @@ -0,0 +1,4 @@ +description: 'A string array response' +schema: + type: array + items: {type: string} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponse.yaml new file mode 100644 index 000000000..61a25e510 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponse.yaml @@ -0,0 +1,3 @@ +description: 'A simple string response' +schema: + type: string diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponseWithHeader.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponseWithHeader.yaml new file mode 100644 index 000000000..77c2c7561 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/stringResponseWithHeader.yaml @@ -0,0 +1,6 @@ +description: 'A simple string response' +schema: + type: string +headers: + is-dog: {type: boolean} + is-cat: {type: boolean} diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/voidResponse.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/voidResponse.yaml new file mode 100644 index 000000000..f2aa5e124 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/yaml/responses/voidResponse.yaml @@ -0,0 +1 @@ +description: 'object created' diff --git a/vendor/github.com/go-openapi/loads/fmts/fixture_test.go b/vendor/github.com/go-openapi/loads/fmts/fixture_test.go new file mode 100644 index 000000000..6213ca57c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fmts/fixture_test.go @@ -0,0 +1,298 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmts + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + "testing" + + "github.com/go-openapi/spec" + "github.com/stretchr/testify/assert" +) + +var extensions = []string{"json"} + +func assertSpecJSON(t testing.TB, specJSON []byte) bool { + var expected map[string]interface{} + if !assert.NoError(t, json.Unmarshal(specJSON, &expected)) { + return false + } + + obj := spec.Swagger{} + if !assert.NoError(t, json.Unmarshal(specJSON, &obj)) { + return false + } + + cb, err := json.MarshalIndent(obj, "", " ") + if assert.NoError(t, err) { + return false + } + var actual map[string]interface{} + if !assert.NoError(t, json.Unmarshal(cb, &actual)) { + return false + } + return assertSpecMaps(t, actual, expected) +} + +func assertSpecMaps(t testing.TB, actual, expected map[string]interface{}) bool { + res := true + if id, ok := expected["id"]; ok { + res = assert.Equal(t, id, actual["id"]) + } + res = res && assert.Equal(t, expected["consumes"], actual["consumes"]) + res = res && assert.Equal(t, expected["produces"], actual["produces"]) + res = res && assert.Equal(t, expected["schemes"], actual["schemes"]) + res = res && assert.Equal(t, expected["swagger"], actual["swagger"]) + res = res && assert.Equal(t, expected["info"], actual["info"]) + res = res && assert.Equal(t, expected["host"], actual["host"]) + res = res && assert.Equal(t, expected["basePath"], actual["basePath"]) + res = res && assert.Equal(t, expected["paths"], actual["paths"]) + res = res && assert.Equal(t, expected["definitions"], actual["definitions"]) + res = res && assert.Equal(t, expected["responses"], actual["responses"]) + res = res && assert.Equal(t, expected["securityDefinitions"], actual["securityDefinitions"]) + res = res && assert.Equal(t, expected["tags"], actual["tags"]) + res = res && assert.Equal(t, expected["externalDocs"], actual["externalDocs"]) + res = res && assert.Equal(t, expected["x-some-extension"], actual["x-some-extension"]) + res = res && assert.Equal(t, expected["x-schemes"], actual["x-schemes"]) + + return res +} + +func roundTripTest(t *testing.T, fixtureType, extension, fileName string, schema interface{}) bool { + if extension == "yaml" { + return roundTripTestYAML(t, fixtureType, fileName, schema) + } + return roundTripTestJSON(t, fixtureType, fileName, schema) +} + +func roundTripTestJSON(t *testing.T, fixtureType, fileName string, schema interface{}) bool { + specName := strings.TrimSuffix(fileName, filepath.Ext(fileName)) + t.Logf("verifying %s JSON fixture %q", fixtureType, specName) + + b, err := ioutil.ReadFile(fileName) + if !assert.NoError(t, err) { + return false + } + + var expected map[string]interface{} + if !assert.NoError(t, json.Unmarshal(b, &expected)) { + return false + } + + if !assert.NoError(t, json.Unmarshal(b, schema)) { + return false + } + + cb, err := json.MarshalIndent(schema, "", " ") + if !assert.NoError(t, err) { + return false + } + + var actual map[string]interface{} + if !assert.NoError(t, json.Unmarshal(cb, &actual)) { + return false + } + + return assert.EqualValues(t, expected, actual) +} + +func roundTripTestYAML(t *testing.T, fixtureType, fileName string, schema interface{}) bool { + specName := strings.TrimSuffix(fileName, filepath.Ext(fileName)) + t.Logf("verifying %s YAML fixture %q", fixtureType, specName) + + b, err := YAMLDoc(fileName) + if !assert.NoError(t, err) { + return false + } + + var expected map[string]interface{} + if !assert.NoError(t, json.Unmarshal(b, &expected)) { + return false + } + + if !assert.NoError(t, json.Unmarshal(b, schema)) { + return false + } + + cb, err := json.MarshalIndent(schema, "", " ") + if !assert.NoError(t, err) { + return false + } + + var actual map[string]interface{} + if !assert.NoError(t, json.Unmarshal(cb, &actual)) { + return false + } + + return assert.EqualValues(t, expected, actual) +} + +func TestPropertyFixtures(t *testing.T) { + for _, extension := range extensions { + path := filepath.Join("..", "fixtures", extension, "models", "properties") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + + // for _, f := range files { + // roundTripTest(t, "property", extension, filepath.Join(path, f.Name()), &Schema{}) + // } + f := files[0] + roundTripTest(t, "property", extension, filepath.Join(path, f.Name()), &spec.Schema{}) + } +} + +func TestAdditionalPropertiesWithObject(t *testing.T) { + schema := new(spec.Schema) + b, err := YAMLDoc("../fixtures/yaml/models/modelWithObjectMap.yaml") + if assert.NoError(t, err) { + var expected map[string]interface{} + if assert.NoError(t, json.Unmarshal(b, &expected)) && assert.NoError(t, json.Unmarshal(b, schema)) { + cb, err := json.MarshalIndent(schema, "", " ") + if assert.NoError(t, err) { + var actual map[string]interface{} + if assert.NoError(t, json.Unmarshal(cb, &actual)) { + assert.Equal(t, expected, actual) + } + } + } + } + +} + +func TestModelFixtures(t *testing.T) { + path := filepath.Join("..", "fixtures", "json", "models") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + specs := []string{"modelWithObjectMap", "models", "modelWithComposition", "modelWithExamples", "multipleModels"} +FILES: + for _, f := range files { + if f.IsDir() { + continue + } + for _, sp := range specs { + if strings.HasPrefix(f.Name(), sp) { + roundTripTest(t, "model", "json", filepath.Join(path, f.Name()), &spec.Schema{}) + continue FILES + } + } + //fmt.Println("trying", f.Name()) + roundTripTest(t, "model", "json", filepath.Join(path, f.Name()), &spec.Schema{}) + } + path = filepath.Join("..", "fixtures", "yaml", "models") + files, err = ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } +YAMLFILES: + for _, f := range files { + if f.IsDir() { + continue + } + for _, sp := range specs { + if strings.HasPrefix(f.Name(), sp) { + roundTripTest(t, "model", "yaml", filepath.Join(path, f.Name()), &spec.Schema{}) + continue YAMLFILES + } + } + // fmt.Println("trying", f.Name()) + roundTripTest(t, "model", "yaml", filepath.Join(path, f.Name()), &spec.Schema{}) + } +} + +func TestParameterFixtures(t *testing.T) { + path := filepath.Join("..", "fixtures", "json", "resources", "parameters") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + + for _, f := range files { + roundTripTest(t, "parameter", "json", filepath.Join(path, f.Name()), &spec.Parameter{}) + } +} + +func TestOperationFixtures(t *testing.T) { + path := filepath.Join("..", "fixtures", "json", "resources", "operations") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + + for _, f := range files { + roundTripTest(t, "operation", "json", filepath.Join(path, f.Name()), &spec.Operation{}) + } +} + +func TestResponseFixtures(t *testing.T) { + path := filepath.Join("..", "fixtures", "json", "responses") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + + for _, f := range files { + if !strings.HasPrefix(f.Name(), "multiple") { + roundTripTest(t, "response", "json", filepath.Join(path, f.Name()), &spec.Response{}) + } else { + roundTripTest(t, "responses", "json", filepath.Join(path, f.Name()), &spec.Responses{}) + } + } +} + +func TestResourcesFixtures(t *testing.T) { + path := filepath.Join("..", "fixtures", "json", "resources") + files, err := ioutil.ReadDir(path) + if err != nil { + t.Fatal(err) + } + pathItems := []string{"resourceWithLinkedDefinitions_part1"} + toSkip := []string{} +FILES: + for _, f := range files { + if f.IsDir() { + continue + } + for _, ts := range toSkip { + if strings.HasPrefix(f.Name(), ts) { + t.Log("verifying resource" + strings.TrimSuffix(f.Name(), filepath.Ext(f.Name()))) + b, err := ioutil.ReadFile(filepath.Join(path, f.Name())) + if assert.NoError(t, err) { + assertSpecJSON(t, b) + } + continue FILES + } + } + for _, pi := range pathItems { + if strings.HasPrefix(f.Name(), pi) { + roundTripTest(t, "path items", "json", filepath.Join(path, f.Name()), &spec.PathItem{}) + continue FILES + } + } + + t.Logf("verifying resource %q", strings.TrimSuffix(f.Name(), filepath.Ext(f.Name()))) + b2, err := ioutil.ReadFile(filepath.Join(path, f.Name())) + if assert.NoError(t, err) { + assertSpecJSON(t, b2) + } + + } +} diff --git a/vendor/github.com/go-openapi/loads/fmts/yaml.go b/vendor/github.com/go-openapi/loads/fmts/yaml.go new file mode 100644 index 000000000..1cef2ac22 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fmts/yaml.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmts + +import "github.com/go-openapi/swag" + +var ( + // YAMLMatcher matches yaml + YAMLMatcher = swag.YAMLMatcher + // YAMLToJSON converts YAML unmarshaled data into json compatible data + YAMLToJSON = swag.YAMLToJSON + // BytesToYAMLDoc converts raw bytes to a map[string]interface{} + BytesToYAMLDoc = swag.BytesToYAMLDoc + // YAMLDoc loads a yaml document from either http or a file and converts it to json + YAMLDoc = swag.YAMLDoc + // YAMLData loads a yaml document from either http or a file + YAMLData = swag.YAMLData +) diff --git a/vendor/github.com/go-openapi/loads/fmts/yaml_test.go b/vendor/github.com/go-openapi/loads/fmts/yaml_test.go new file mode 100644 index 000000000..e347805d3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fmts/yaml_test.go @@ -0,0 +1,445 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fmts + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + yaml "gopkg.in/yaml.v2" + + "github.com/go-openapi/swag" + "github.com/stretchr/testify/assert" +) + +type failJSONMarhal struct { +} + +func (f failJSONMarhal) MarshalJSON() ([]byte, error) { + return nil, errors.New("expected") +} + +func TestLoadHTTPBytes(t *testing.T) { + _, err := swag.LoadFromFileOrHTTP("httx://12394:abd") + assert.Error(t, err) + + serv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + })) + defer serv.Close() + + _, err = swag.LoadFromFileOrHTTP(serv.URL) + assert.Error(t, err) + + ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusOK) + rw.Write([]byte("the content")) + })) + defer ts2.Close() + + d, err := swag.LoadFromFileOrHTTP(ts2.URL) + assert.NoError(t, err) + assert.Equal(t, []byte("the content"), d) +} + +func TestYAMLToJSON(t *testing.T) { + + sd := `--- +1: the int key value +name: a string value +'y': some value +` + var data yaml.MapSlice + yaml.Unmarshal([]byte(sd), &data) + + d, err := YAMLToJSON(data) + if assert.NoError(t, err) { + assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value"}`, string(d)) + } + + data = append(data, yaml.MapItem{Key: true, Value: "the bool value"}) + d, err = YAMLToJSON(data) + assert.Error(t, err) + assert.Nil(t, d) + + data = data[:len(data)-1] + + tag := yaml.MapSlice{{Key: "name", Value: "tag name"}} + data = append(data, yaml.MapItem{Key: "tag", Value: tag}) + + d, err = YAMLToJSON(data) + assert.NoError(t, err) + assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value","tag":{"name":"tag name"}}`, string(d)) + + tag = yaml.MapSlice{{Key: true, Value: "bool tag name"}} + data = append(data[:len(data)-1], yaml.MapItem{Key: "tag", Value: tag}) + + d, err = YAMLToJSON(data) + assert.Error(t, err) + assert.Nil(t, d) + + var lst []interface{} + lst = append(lst, "hello") + + d, err = YAMLToJSON(lst) + assert.NoError(t, err) + assert.Equal(t, []byte(`["hello"]`), []byte(d)) + + lst = append(lst, data) + + d, err = YAMLToJSON(lst) + assert.Error(t, err) + assert.Nil(t, d) + + // _, err := yamlToJSON(failJSONMarhal{}) + // assert.Error(t, err) + + _, err = BytesToYAMLDoc([]byte("- name: hello\n")) + assert.Error(t, err) + + dd, err := BytesToYAMLDoc([]byte("description: 'object created'\n")) + assert.NoError(t, err) + + d, err = YAMLToJSON(dd) + assert.NoError(t, err) + assert.Equal(t, json.RawMessage(`{"description":"object created"}`), d) +} + +func TestLoadStrategy(t *testing.T) { + + loader := func(p string) ([]byte, error) { + return []byte(yamlPetStore), nil + } + remLoader := func(p string) ([]byte, error) { + return []byte("not it"), nil + } + + ld := swag.LoadStrategy("blah", loader, remLoader) + b, _ := ld("") + assert.Equal(t, []byte(yamlPetStore), b) + + serv := httptest.NewServer(http.HandlerFunc(yamlPestoreServer)) + defer serv.Close() + + s, err := YAMLDoc(serv.URL) + assert.NoError(t, err) + assert.NotNil(t, s) + + ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + rw.Write([]byte("\n")) + })) + defer ts2.Close() + _, err = YAMLDoc(ts2.URL) + assert.Error(t, err) +} + +var yamlPestoreServer = func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusOK) + rw.Write([]byte(yamlPetStore)) +} + +func TestWithYKey(t *testing.T) { + doc, err := BytesToYAMLDoc([]byte(withYKey)) + if assert.NoError(t, err) { + _, err := YAMLToJSON(doc) + if assert.Error(t, err) { + doc, err := BytesToYAMLDoc([]byte(withQuotedYKey)) + if assert.NoError(t, err) { + jsond, err := YAMLToJSON(doc) + if assert.NoError(t, err) { + var yt struct { + Definitions struct { + Viewbox struct { + Properties struct { + Y struct { + Type string `json:"type"` + } `json:"y"` + } `json:"properties"` + } `json:"viewbox"` + } `json:"definitions"` + } + if assert.NoError(t, json.Unmarshal(jsond, &yt)) { + assert.Equal(t, "integer", yt.Definitions.Viewbox.Properties.Y.Type) + } + } + } + } + + } +} + +const withQuotedYKey = `consumes: +- application/json +definitions: + viewBox: + type: object + properties: + x: + type: integer + format: int16 + # y -> types don't match: expect map key string or int get: bool + "y": + type: integer + format: int16 + width: + type: integer + format: int16 + height: + type: integer + format: int16 +info: + description: Test RESTful APIs + title: Test Server + version: 1.0.0 +basePath: /api +paths: + /test: + get: + operationId: findAll + parameters: + - name: since + in: query + type: integer + format: int64 + - name: limit + in: query + type: integer + format: int32 + default: 20 + responses: + 200: + description: Array[Trigger] + schema: + type: array + items: + $ref: "#/definitions/viewBox" +produces: +- application/json +schemes: +- https +swagger: "2.0" +` + +const withYKey = `consumes: +- application/json +definitions: + viewBox: + type: object + properties: + x: + type: integer + format: int16 + # y -> types don't match: expect map key string or int get: bool + y: + type: integer + format: int16 + width: + type: integer + format: int16 + height: + type: integer + format: int16 +info: + description: Test RESTful APIs + title: Test Server + version: 1.0.0 +basePath: /api +paths: + /test: + get: + operationId: findAll + parameters: + - name: since + in: query + type: integer + format: int64 + - name: limit + in: query + type: integer + format: int32 + default: 20 + responses: + 200: + description: Array[Trigger] + schema: + type: array + items: + $ref: "#/definitions/viewBox" +produces: +- application/json +schemes: +- https +swagger: "2.0" +` + +const yamlPetStore = `swagger: '2.0' +info: + version: '1.0.0' + title: Swagger Petstore + description: A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification + termsOfService: http://helloreverb.com/terms/ + contact: + name: Swagger API team + email: foo@example.com + url: http://swagger.io + license: + name: MIT + url: http://opensource.org/licenses/MIT +host: petstore.swagger.wordnik.com +basePath: /api +schemes: + - http +consumes: + - application/json +produces: + - application/json +paths: + /pets: + get: + description: Returns all pets from the system that the user has access to + operationId: findPets + produces: + - application/json + - application/xml + - text/xml + - text/html + parameters: + - name: tags + in: query + description: tags to filter by + required: false + type: array + items: + type: string + collectionFormat: csv + - name: limit + in: query + description: maximum number of results to return + required: false + type: integer + format: int32 + responses: + '200': + description: pet response + schema: + type: array + items: + $ref: '#/definitions/pet' + default: + description: unexpected error + schema: + $ref: '#/definitions/errorModel' + post: + description: Creates a new pet in the store. Duplicates are allowed + operationId: addPet + produces: + - application/json + parameters: + - name: pet + in: body + description: Pet to add to the store + required: true + schema: + $ref: '#/definitions/newPet' + responses: + '200': + description: pet response + schema: + $ref: '#/definitions/pet' + default: + description: unexpected error + schema: + $ref: '#/definitions/errorModel' + /pets/{id}: + get: + description: Returns a user based on a single ID, if the user does not have access to the pet + operationId: findPetById + produces: + - application/json + - application/xml + - text/xml + - text/html + parameters: + - name: id + in: path + description: ID of pet to fetch + required: true + type: integer + format: int64 + responses: + '200': + description: pet response + schema: + $ref: '#/definitions/pet' + default: + description: unexpected error + schema: + $ref: '#/definitions/errorModel' + delete: + description: deletes a single pet based on the ID supplied + operationId: deletePet + parameters: + - name: id + in: path + description: ID of pet to delete + required: true + type: integer + format: int64 + responses: + '204': + description: pet deleted + default: + description: unexpected error + schema: + $ref: '#/definitions/errorModel' +definitions: + pet: + required: + - id + - name + properties: + id: + type: integer + format: int64 + name: + type: string + tag: + type: string + newPet: + allOf: + - $ref: '#/definitions/pet' + - required: + - name + properties: + id: + type: integer + format: int64 + name: + type: string + errorModel: + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string +` diff --git a/vendor/github.com/go-openapi/loads/hack/build-drone.sh b/vendor/github.com/go-openapi/loads/hack/build-drone.sh new file mode 100755 index 000000000..52b1180cc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/hack/build-drone.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -e -o pipefail + + mkdir -p /drone/{testresults,coverage,dist} + go test -race -timeout 20m -v ./... | go-junit-report -dir /drone/testresults + +# Run test coverage on each subdirectories and merge the coverage profile. +echo "mode: ${GOCOVMODE-count}" > profile.cov + +# Standard go tooling behavior is to ignore dirs with leading underscores +# skip generator for race detection and coverage +for dir in $(go list ./...) +do + pth="$GOPATH/src/$dir" + go test -covermode=${GOCOVMODE-count} -coverprofile=${pth}/profile.out $dir + if [ -f $pth/profile.out ] + then + cat $pth/profile.out | tail -n +2 >> profile.cov + # rm $pth/profile.out + fi +done + +go tool cover -func profile.cov +gocov convert profile.cov | gocov report +gocov convert profile.cov | gocov-html > /drone/coverage/coverage-${CI_BUILD_NUM-"0"}.html \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/hack/coverage b/vendor/github.com/go-openapi/loads/hack/coverage new file mode 100755 index 000000000..b8e6dbd0c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/hack/coverage @@ -0,0 +1,20 @@ +#!/bin/bash +set -e -o pipefail + +# Run test coverage on each subdirectories and merge the coverage profile. +echo "mode: ${GOCOVMODE-atomic}" > coverage.txt + +# Standard go tooling behavior is to ignore dirs with leading underscores +# skip generator for race detection and coverage +for dir in $(go list ./...) +do + pth="$GOPATH/src/$dir" + go test -race -timeout 20m -covermode=${GOCOVMODE-atomic} -coverprofile=${pth}/profile.out $dir + if [ -f $pth/profile.out ] + then + cat $pth/profile.out | tail -n +2 >> coverage.txt + rm $pth/profile.out + fi +done + +go tool cover -func coverage.txt diff --git a/vendor/github.com/go-openapi/loads/json_test.go b/vendor/github.com/go-openapi/loads/json_test.go new file mode 100644 index 000000000..8b60eb19f --- /dev/null +++ b/vendor/github.com/go-openapi/loads/json_test.go @@ -0,0 +1,318 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadJSON(t *testing.T) { + serv := httptest.NewServer(http.HandlerFunc(jsonPestoreServer)) + defer serv.Close() + + s, err := JSONSpec(serv.URL) + assert.NoError(t, err) + assert.NotNil(t, s) + + ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + rw.Write([]byte("{}")) + })) + defer ts2.Close() + _, err = JSONSpec(ts2.URL) + assert.Error(t, err) +} + +var jsonPestoreServer = func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusOK) + rw.Write([]byte(petstoreJSON)) +} + +const petstoreJSON = `{ + "swagger": "2.0", + "info": { + "version": "1.0.0", + "title": "Swagger Petstore", + "contact": { + "name": "Wordnik API Team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "petstore.swagger.wordnik.com", + "basePath": "/api", + "schemes": [ + "http" + ], + "paths": { + "/pets": { + "get": { + "security": [ + { + "oauth2": ["read"] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "getAllPets", + "parameters": [ + { + "name": "status", + "in": "query", + "description": "The status to filter by", + "type": "string" + } + ], + "summary": "Finds all pets in the system", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "post": { + "security": [ + { + "oauth2": ["write"] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "createPet", + "summary": "Creates a new pet", + "parameters": [ + { + "name": "pet", + "in": "body", + "description": "The Pet to create", + "required": true, + "schema": { + "$ref": "#/definitions/newPet" + } + } + ], + "responses": { + "200": { + "description": "Created Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + } + }, + "/pets/{id}": { + "delete": { + "security": [ + { + "oauth2": ["write"] + } + ], + "description": "Deletes the Pet by id", + "operationId": "deletePet", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "204": { + "description": "pet deleted" + }, + "default": { + "description": "unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "get": { + "security": [ + { + "oauth2": ["read"] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "getPetById", + "summary": "Finds the pet by id", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet", + "required": true, + "type": "integer", + "format": "int64" + } + ] + } + }, + "definitions": { + "Category": { + "id": "Category", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Pet": { + "id": "Pet", + "properties": { + "category": { + "$ref": "#/definitions/Category" + }, + "id": { + "description": "unique identifier for the pet", + "format": "int64", + "maximum": 100.0, + "minimum": 0.0, + "type": "integer" + }, + "name": { + "type": "string" + }, + "photoUrls": { + "items": { + "type": "string" + }, + "type": "array" + }, + "status": { + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ], + "type": "string" + }, + "tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + } + }, + "required": [ + "id", + "name" + ] + }, + "newPet": { + "allOf": [ + { + "$ref": "#/definitions/Pet" + } + ], + "required": [ + "name" + ] + }, + "Tag": { + "id": "Tag", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Error": { + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + }, + "produces": [ + "application/json", + "application/xml", + "text/plain", + "text/html" + ], + "securityDefinitions": { + "oauth2": { + "type": "oauth2", + "scopes": { + "read": "Read access.", + "write": "Write access" + }, + "flow": "accessCode", + "authorizationUrl": "http://petstore.swagger.wordnik.com/oauth/authorize", + "tokenUrl": "http://petstore.swagger.wordnik.com/oauth/token" + } + } +}` diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 000000000..649ca06e6 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,279 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +var ( + loaders *loader + defaultLoader *loader +) + +func init() { + defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} + loaders = defaultLoader + spec.PathLoader = loaders.Fn + AddLoader(swag.YAMLMatcher, swag.YAMLDoc) +} + +// AddLoader for a document +func AddLoader(predicate DocMatcher, load DocLoader) { + prev := loaders + loaders = &loader{ + Match: predicate, + Fn: load, + Next: prev, + } + spec.PathLoader = loaders.Fn +} + +type loader struct { + Fn DocLoader + Match DocMatcher + Next *loader +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + return Analyzed(json.RawMessage(data), "") +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + }, nil +} + +// Spec loads a new spec document +func Spec(path string) (*Document, error) { + specURL, err := url.Parse(path) + if err != nil { + return nil, err + } + var lastErr error + for l := loaders.Next; l != nil; l = l.Next { + if loaders.Match(specURL.Path) { + b, err2 := loaders.Fn(path) + if err2 != nil { + lastErr = err2 + continue + } + doc, err := Analyzed(b, "") + if err != nil { + return nil, err + } + if doc != nil { + doc.specFilePath = path + } + return doc, nil + } + } + if lastErr != nil { + return nil, lastErr + } + b, err := defaultLoader.Fn(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "") + if document != nil { + document.specFilePath = path + } + + return document, err +} + +// Analyzed creates a new analyzed spec document +func Analyzed(data json.RawMessage, version string) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw := data + trimmed := bytes.TrimSpace(data) + if len(trimmed) > 0 { + if trimmed[0] != '{' && trimmed[0] != '[' { + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + raw = d + } + } + + swspec := new(spec.Swagger) + if err := json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec := new(spec.Swagger) + if err := json.Unmarshal(raw, origsqspec); err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + } + return d, nil +} + +// Expanded expands the ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + dd, _ := Analyzed(d.Raw(), d.Version()) + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} diff --git a/vendor/github.com/go-openapi/loads/spec_test.go b/vendor/github.com/go-openapi/loads/spec_test.go new file mode 100644 index 000000000..a07694d33 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec_test.go @@ -0,0 +1,717 @@ +package loads + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnknownSpecVersion(t *testing.T) { + _, err := Analyzed([]byte{}, "0.9") + assert.Error(t, err) +} + +func TestDefaultsTo20(t *testing.T) { + d, err := Analyzed(PetStoreJSONMessage, "") + + assert.NoError(t, err) + assert.NotNil(t, d) + assert.Equal(t, "2.0", d.Version()) + // assert.Equal(t, "2.0", d.data["swagger"].(string)) + assert.Equal(t, "/api", d.BasePath()) +} + +func TestLoadsYAMLContent(t *testing.T) { + d, err := Analyzed(json.RawMessage([]byte(YAMLSpec)), "") + if assert.NoError(t, err) { + if assert.NotNil(t, d) { + sw := d.Spec() + assert.Equal(t, "1.0.0", sw.Info.Version) + } + } +} + +// for issue 11 +func TestRegressionExpand(t *testing.T) { + swaggerFile := "fixtures/yaml/swagger/1/2/3/4/swagger.yaml" + document, err := Spec(swaggerFile) + assert.NoError(t, err) + assert.NotNil(t, document) + d, err := document.Expanded() + assert.NoError(t, err) + assert.NotNil(t, d) + b, _ := d.Spec().MarshalJSON() + assert.JSONEq(t, expectedExpanded, string(b)) +} + +func TestCascadingRefExpand(t *testing.T) { + swaggerFile := "fixtures/yaml/swagger/spec.yml" + document, err := Spec(swaggerFile) + assert.NoError(t, err) + assert.NotNil(t, document) + d, err := document.Expanded() + assert.NoError(t, err) + assert.NotNil(t, d) + b, _ := d.Spec().MarshalJSON() + assert.JSONEq(t, cascadeRefExpanded, string(b)) +} + +func TestFailsInvalidJSON(t *testing.T) { + _, err := Analyzed(json.RawMessage([]byte("{]")), "") + + assert.Error(t, err) +} + +var YAMLSpec = `swagger: '2.0' + +info: + version: "1.0.0" + title: Simple Search API + description: | + A very simple api description that makes a x-www-form-urlencoded only API to submit searches. + +produces: + - application/json + +consumes: + - application/json + +paths: + /search: + post: + operationId: search + summary: searches tasks + description: searches the task titles and descriptions for a match + consumes: + - application/x-www-form-urlencoded + parameters: + - name: q + in: formData + type: string + description: the search string + required: true + /tasks: + get: + operationId: getTasks + summary: Gets Task objects. + description: | + Optional query param of **size** determines + size of returned array + tags: + - tasks + parameters: + - name: size + in: query + description: Size of task list + type: integer + format: int32 + default: 20 + - name: completed + in: query + description: when true shows completed tasks + type: boolean + + responses: + default: + description: Generic Error + 200: + description: Successful response + headers: + X-Rate-Limit: + type: integer + format: int32 + X-Rate-Limit-Remaining: + type: integer + format: int32 + default: 42 + X-Rate-Limit-Reset: + type: integer + format: int32 + default: "1449875311" + X-Rate-Limit-Reset-Human: + type: string + default: 3 days + X-Rate-Limit-Reset-Human-Number: + type: string + default: 3 + Access-Control-Allow-Origin: + type: string + default: "*" + schema: + type: array + items: + $ref: "#/definitions/Task" + post: + operationId: createTask + summary: Creates a 'Task' object. + description: | + Validates the content property for length etc. + parameters: + - name: body + in: body + schema: + $ref: "#/definitions/Task" + tags: + - tasks + responses: + default: + description: Generic Error + 201: + description: Task Created + + /tasks/{id}: + parameters: + - name: id + in: path + type: integer + format: int32 + description: The id of the task + required: true + minimum: 1 + put: + operationId: updateTask + summary: updates a task. + description: | + Validates the content property for length etc. + tags: + - tasks + parameters: + - name: body + in: body + description: the updated task + schema: + $ref: "#/definitions/Task" + responses: + default: + description: Generic Error + 200: + description: Task updated + schema: + $ref: "#/definitions/Task" + delete: + operationId: deleteTask + summary: deletes a task + description: | + Deleting a task is irrevocable. + tags: + - tasks + responses: + default: + description: Generic Error + 204: + description: Task Deleted + + +definitions: + Task: + title: A Task object + description: | + This describes a task. Tasks require a content property to be set. + required: + - content + type: object + properties: + id: + title: the unique id of the task + description: | + This id property is autogenerated when a task is created. + type: integer + format: int64 + readOnly: true + content: + title: The content of the task + description: | + Task content can contain [GFM](https://help.github.com/articles/github-flavored-markdown/). + type: string + minLength: 5 + completed: + title: when true this task is completed + type: boolean + creditcard: + title: the credit card format usage + type: string + format: creditcard + createdAt: + title: task creation time + type: string + format: date-time + readOnly: true +` + +// PetStoreJSONMessage json raw message for Petstore20 +var PetStoreJSONMessage = json.RawMessage([]byte(PetStore20)) + +// PetStore20 json doc for swagger 2.0 pet store +const PetStore20 = `{ + "swagger": "2.0", + "info": { + "version": "1.0.0", + "title": "Swagger Petstore", + "contact": { + "name": "Wordnik API Team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "petstore.swagger.wordnik.com", + "basePath": "/api", + "schemes": [ + "http" + ], + "paths": { + "/pets": { + "get": { + "security": [ + { + "basic": [] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "getAllPets", + "parameters": [ + { + "name": "status", + "in": "query", + "description": "The status to filter by", + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "The maximum number of results to return", + "type": "integer", + "format": "int64" + } + ], + "summary": "Finds all pets in the system", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "post": { + "security": [ + { + "basic": [] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "createPet", + "summary": "Creates a new pet", + "consumes": ["application/x-yaml"], + "produces": ["application/x-yaml"], + "parameters": [ + { + "name": "pet", + "in": "body", + "description": "The Pet to create", + "required": true, + "schema": { + "$ref": "#/definitions/newPet" + } + } + ], + "responses": { + "200": { + "description": "Created Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + } + }, + "/pets/{id}": { + "delete": { + "security": [ + { + "apiKey": [] + } + ], + "description": "Deletes the Pet by id", + "operationId": "deletePet", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "204": { + "description": "pet deleted" + }, + "default": { + "description": "unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "get": { + "tags": [ "Pet Operations" ], + "operationId": "getPetById", + "summary": "Finds the pet by id", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet", + "required": true, + "type": "integer", + "format": "int64" + } + ] + } + }, + "definitions": { + "Category": { + "id": "Category", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Pet": { + "id": "Pet", + "properties": { + "category": { + "$ref": "#/definitions/Category" + }, + "id": { + "description": "unique identifier for the pet", + "format": "int64", + "maximum": 100.0, + "minimum": 0.0, + "type": "integer" + }, + "name": { + "type": "string" + }, + "photoUrls": { + "items": { + "type": "string" + }, + "type": "array" + }, + "status": { + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ], + "type": "string" + }, + "tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + } + }, + "required": [ + "id", + "name" + ] + }, + "newPet": { + "anyOf": [ + { + "$ref": "#/definitions/Pet" + }, + { + "required": [ + "name" + ] + } + ] + }, + "Tag": { + "id": "Tag", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Error": { + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + }, + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/json", + "application/xml", + "text/plain", + "text/html" + ], + "securityDefinitions": { + "basic": { + "type": "basic" + }, + "apiKey": { + "type": "apiKey", + "in": "header", + "name": "X-API-KEY" + } + } +} +` + +const expectedExpanded = ` +{ + "produces":[ + "application/json", + "plain/text" + ], + "schemes":[ + "https", + "http" + ], + "swagger":"2.0", + "info":{ + "description":"Something", + "title":"Something", + "contact":{ + "name":"Somebody", + "url":"https://url.com", + "email":"email@url.com" + }, + "version":"v1" + }, + "host":"security.sonusnet.com", + "basePath":"/api", + "paths":{ + "/whatnot":{ + "get":{ + "description":"Get something", + "responses":{ + "200":{ + "description":"The something", + "schema":{ + "description":"A collection of service events", + "type":"object", + "properties":{ + "page":{ + "description":"A description of a paged result", + "type":"object", + "properties":{ + "page":{ + "description":"the page that was requested", + "type":"integer" + }, + "page_items":{ + "description":"the number of items per page requested", + "type":"integer" + }, + "pages":{ + "description":"the total number of pages available", + "type":"integer" + }, + "total_items":{ + "description":"the total number of items available", + "type":"integer", + "format":"int64" + } + } + }, + "something":{ + "description":"Something", + "type":"object", + "properties":{ + "p1":{ + "description":"A string", + "type":"string" + }, + "p2":{ + "description":"An integer", + "type":"integer" + } + } + } + } + } + }, + "500":{ + "description":"Oops" + } + } + } + } + }, + "definitions":{ + "Something":{ + "description":"A collection of service events", + "type":"object", + "properties":{ + "page":{ + "description":"A description of a paged result", + "type":"object", + "properties":{ + "page":{ + "description":"the page that was requested", + "type":"integer" + }, + "page_items":{ + "description":"the number of items per page requested", + "type":"integer" + }, + "pages":{ + "description":"the total number of pages available", + "type":"integer" + }, + "total_items":{ + "description":"the total number of items available", + "type":"integer", + "format":"int64" + } + } + }, + "something":{ + "description":"Something", + "type":"object", + "properties":{ + "p1":{ + "description":"A string", + "type":"string" + }, + "p2":{ + "description":"An integer", + "type":"integer" + } + } + } + } + } + } +} +` + +const cascadeRefExpanded = ` +{ + "swagger": "2.0", + "consumes":[ + "application/json" + ], + "produces":[ + "application/json" + ], + "schemes":[ + "http" + ], + "info":{ + "description":"recursively following JSON references", + "title":"test 1", + "contact":{ + "name":"Fred" + }, + "version":"0.1.1" + }, + "paths":{ + "/getAll":{ + "get":{ + "operationId":"getAll", + "parameters":[ + { + "description":"max number of results", + "name":"a", + "in":"body", + "schema":{ + "type":"string" + } + } + ], + "responses":{ + "200":{ + "description":"Success", + "schema":{ + "type":"array", + "items":{ + "type":"string" + } + } + } + } + } + } + }, + "definitions":{ + "a":{ + "type":"string" + }, + "b":{ + "type":"array", + "items":{ + "type":"string" + } + } + } +} +` diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.github/CONTRIBUTING.md b/vendor/github.com/go-openapi/strfmt/.github/CONTRIBUTING.md new file mode 100644 index 000000000..7dea4240d --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.github/CONTRIBUTING.md @@ -0,0 +1,117 @@ +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep go-swagger lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* go-swagger. + + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 000000000..4029779e1 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,19 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned diff --git a/vendor/github.com/go-openapi/strfmt/.travis.yml b/vendor/github.com/go-openapi/strfmt/.travis.yml new file mode 100644 index 000000000..88a3a6eae --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.travis.yml @@ -0,0 +1,18 @@ +language: go +go: +- 1.7.1 +install: +- go get -u github.com/stretchr/testify/assert +- go get -u github.com/pborman/uuid +- go get -u github.com/asaskevich/govalidator +- go get -u github.com/mailru/easyjson +- go get -u github.com/go-openapi/errors +- go get -u github.com/mitchellh/mapstructure +- go get -u gopkg.in/mgo.v2/bson +script: +- ./hack/coverage +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: zE5AtIYTpYfQPnTzP+EaQPN7JKtfFAGv6PrJqoIZLOXa8B6zGb6+J1JRNNxWi7faWbyJOxa4FSSsuPsKZMycUK6wlLFIdhDxwqeo7Ew8r6rdZKdfUHQggfNS9wO79ARoNYUDHtmnaBUS+eWSM1YqSc4i99QxyyfuURLOeAaA/q14YbdlTlaw3lrZ0qT92ot1FnVGNOx064zuHtFeUf+jAVRMZ6Q3rvqllwIlPszE6rmHGXBt2VoJxRaBetdwd7FgkcYw9FPXKHhadwC7/75ZAdmxIukhxNMw4Tr5NuPcqNcnbYLenDP7B3lssGVIrP4BRSqekS1d/tqvdvnnFWHMwrNCkSnSc065G5+qWTlXKAemIclgiXXqE2furBNLm05MDdG8fn5epS0UNarkjD+zX336RiqwBlOX4KbF+vPyqcO98CsN0lnd+H6loc9reiTHs37orFFpQ+309av9be2GGsHUsRB9ssIyrewmhAccOmkRtr2dVTZJNFQwa5Kph5TNJuTjnZEwG/xUkEX2YSfwShOsb062JWiflV6PJdnl80pc9Tn7D5sO5Bf9DbijGRJwwP+YiiJtwtr+vsvS+n4sM0b5eqm4UoRo+JJO8ffoJtHS7ItuyRbVQCwEPJ4221WLcf5PquEEDdAPwR+K4Gj8qTXqTDdxOiES1xFUKVgmzhI= diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 000000000..55f880415 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,71 @@ +# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 000000000..d5a4f9861 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,128 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "errors" + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + + "gopkg.in/mgo.v2/bson" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + return bson.IsObjectIdHex(str) +} + +// ObjectId represents a BSON object ID (alias to gopkg.in/mgo.v2/bson.ObjectId) +// +// swagger:strfmt bsonobjectid +type ObjectId bson.ObjectId + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { + return ObjectId(bson.ObjectIdHex(hex)) +} + +// MarshalText turns this instance into text +func (id *ObjectId) MarshalText() ([]byte, error) { + return []byte(bson.ObjectId(*id).Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + *id = ObjectId(bson.ObjectIdHex(string(data))) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id *ObjectId) Value() (driver.Value, error) { + return driver.Value(string(*id)), nil +} + +func (id *ObjectId) String() string { + return string(*id) +} + +// MarshalJSON returns the ObjectId as JSON +func (id *ObjectId) MarshalJSON() ([]byte, error) { + var w jwriter.Writer + id.MarshalEasyJSON(&w) + return w.BuildBytes() +} + +// MarshalEasyJSON writes the ObjectId to a easyjson.Writer +func (id *ObjectId) MarshalEasyJSON(w *jwriter.Writer) { + w.String(bson.ObjectId(*id).Hex()) +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + id.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON sets the ObjectId from a easyjson.Lexer +func (id *ObjectId) UnmarshalEasyJSON(in *jlexer.Lexer) { + if data := in.String(); in.Ok() { + *id = NewObjectId(data) + } +} + +// GetBSON returns the hex representation of the ObjectId as a bson.M{} map. +func (id *ObjectId) GetBSON() (interface{}, error) { + return bson.M{"data": bson.ObjectId(*id).Hex()}, nil +} + +// SetBSON sets the ObjectId from raw bson data +func (id *ObjectId) SetBSON(raw bson.Raw) error { + var m bson.M + if err := raw.Unmarshal(&m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + *id = NewObjectId(data) + return nil + } + + return errors.New("couldn't unmarshal bson raw value as ObjectId") +} diff --git a/vendor/github.com/go-openapi/strfmt/bson_test.go b/vendor/github.com/go-openapi/strfmt/bson_test.go new file mode 100644 index 000000000..32ba962b1 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson_test.go @@ -0,0 +1,53 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "testing" + + "gopkg.in/mgo.v2/bson" + + "github.com/stretchr/testify/assert" +) + +func TestBSONObjectId_fullCycle(t *testing.T) { + id := NewObjectId("507f1f77bcf86cd799439011") + bytes, err := id.MarshalText() + assert.NoError(t, err) + + var idCopy ObjectId + + err = idCopy.Scan(bytes) + assert.NoError(t, err) + assert.Equal(t, id, idCopy) + + err = idCopy.UnmarshalText(bytes) + assert.NoError(t, err) + assert.Equal(t, id, idCopy) + + jsonBytes, err := id.MarshalJSON() + assert.NoError(t, err) + + err = idCopy.UnmarshalJSON(jsonBytes) + assert.NoError(t, err) + assert.Equal(t, id, idCopy) + + bsonBytes, err := bson.Marshal(&id) + assert.NoError(t, err) + + err = bson.Unmarshal(bsonBytes, &idCopy) + assert.NoError(t, err) + assert.Equal(t, id, idCopy) +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/date.go b/vendor/github.com/go-openapi/strfmt/conv/date.go new file mode 100644 index 000000000..b5c382060 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/date.go @@ -0,0 +1,18 @@ +package conv + +import "github.com/go-openapi/strfmt" + +// Date returns a pointer to of the Date value passed in. +func Date(v strfmt.Date) *strfmt.Date { + return &v +} + +// DateValue returns the value of the Date pointer passed in or +// the default value if the pointer is nil. +func DateValue(v *strfmt.Date) strfmt.Date { + if v == nil { + return strfmt.Date{} + } + + return *v +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/date_test.go b/vendor/github.com/go-openapi/strfmt/conv/date_test.go new file mode 100644 index 000000000..da9788892 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/date_test.go @@ -0,0 +1,15 @@ +package conv + +import ( + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" +) + +func TestDateValue(t *testing.T) { + assert.Equal(t, strfmt.Date{}, DateValue(nil)) + date := strfmt.Date(time.Now()) + assert.Equal(t, date, DateValue(&date)) +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/default.go b/vendor/github.com/go-openapi/strfmt/conv/default.go new file mode 100644 index 000000000..d00fa175e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/default.go @@ -0,0 +1,290 @@ +package conv + +import ( + "github.com/go-openapi/strfmt" +) + +// Base64 returns a pointer to of the Base64 value passed in. +func Base64(v strfmt.Base64) *strfmt.Base64 { + return &v +} + +// Base64Value returns the value of the Base64 pointer passed in or +// the default value if the pointer is nil. +func Base64Value(v *strfmt.Base64) strfmt.Base64 { + if v == nil { + return nil + } + + return *v +} + +// URI returns a pointer to of the URI value passed in. +func URI(v strfmt.URI) *strfmt.URI { + return &v +} + +// URIValue returns the value of the URI pointer passed in or +// the default value if the pointer is nil. +func URIValue(v *strfmt.URI) strfmt.URI { + if v == nil { + return strfmt.URI("") + } + + return *v +} + +// Email returns a pointer to of the Email value passed in. +func Email(v strfmt.Email) *strfmt.Email { + return &v +} + +// EmailValue returns the value of the Email pointer passed in or +// the default value if the pointer is nil. +func EmailValue(v *strfmt.Email) strfmt.Email { + if v == nil { + return strfmt.Email("") + } + + return *v +} + +// Hostname returns a pointer to of the Hostname value passed in. +func Hostname(v strfmt.Hostname) *strfmt.Hostname { + return &v +} + +// HostnameValue returns the value of the Hostname pointer passed in or +// the default value if the pointer is nil. +func HostnameValue(v *strfmt.Hostname) strfmt.Hostname { + if v == nil { + return strfmt.Hostname("") + } + + return *v +} + +// IPv4 returns a pointer to of the IPv4 value passed in. +func IPv4(v strfmt.IPv4) *strfmt.IPv4 { + return &v +} + +// IPv4Value returns the value of the IPv4 pointer passed in or +// the default value if the pointer is nil. +func IPv4Value(v *strfmt.IPv4) strfmt.IPv4 { + if v == nil { + return strfmt.IPv4("") + } + + return *v +} + +// IPv6 returns a pointer to of the IPv6 value passed in. +func IPv6(v strfmt.IPv6) *strfmt.IPv6 { + return &v +} + +// IPv6Value returns the value of the IPv6 pointer passed in or +// the default value if the pointer is nil. +func IPv6Value(v *strfmt.IPv6) strfmt.IPv6 { + if v == nil { + return strfmt.IPv6("") + } + + return *v +} + +// MAC returns a pointer to of the MAC value passed in. +func MAC(v strfmt.MAC) *strfmt.MAC { + return &v +} + +// MACValue returns the value of the MAC pointer passed in or +// the default value if the pointer is nil. +func MACValue(v *strfmt.MAC) strfmt.MAC { + if v == nil { + return strfmt.MAC("") + } + + return *v +} + +// UUID returns a pointer to of the UUID value passed in. +func UUID(v strfmt.UUID) *strfmt.UUID { + return &v +} + +// UUIDValue returns the value of the UUID pointer passed in or +// the default value if the pointer is nil. +func UUIDValue(v *strfmt.UUID) strfmt.UUID { + if v == nil { + return strfmt.UUID("") + } + + return *v +} + +// UUID3 returns a pointer to of the UUID3 value passed in. +func UUID3(v strfmt.UUID3) *strfmt.UUID3 { + return &v +} + +// UUID3Value returns the value of the UUID3 pointer passed in or +// the default value if the pointer is nil. +func UUID3Value(v *strfmt.UUID3) strfmt.UUID3 { + if v == nil { + return strfmt.UUID3("") + } + + return *v +} + +// UUID4 returns a pointer to of the UUID4 value passed in. +func UUID4(v strfmt.UUID4) *strfmt.UUID4 { + return &v +} + +// UUID4Value returns the value of the UUID4 pointer passed in or +// the default value if the pointer is nil. +func UUID4Value(v *strfmt.UUID4) strfmt.UUID4 { + if v == nil { + return strfmt.UUID4("") + } + + return *v +} + +// UUID5 returns a pointer to of the UUID5 value passed in. +func UUID5(v strfmt.UUID5) *strfmt.UUID5 { + return &v +} + +// UUID5Value returns the value of the UUID5 pointer passed in or +// the default value if the pointer is nil. +func UUID5Value(v *strfmt.UUID5) strfmt.UUID5 { + if v == nil { + return strfmt.UUID5("") + } + + return *v +} + +// ISBN returns a pointer to of the ISBN value passed in. +func ISBN(v strfmt.ISBN) *strfmt.ISBN { + return &v +} + +// ISBNValue returns the value of the ISBN pointer passed in or +// the default value if the pointer is nil. +func ISBNValue(v *strfmt.ISBN) strfmt.ISBN { + if v == nil { + return strfmt.ISBN("") + } + + return *v +} + +// ISBN10 returns a pointer to of the ISBN10 value passed in. +func ISBN10(v strfmt.ISBN10) *strfmt.ISBN10 { + return &v +} + +// ISBN10Value returns the value of the ISBN10 pointer passed in or +// the default value if the pointer is nil. +func ISBN10Value(v *strfmt.ISBN10) strfmt.ISBN10 { + if v == nil { + return strfmt.ISBN10("") + } + + return *v +} + +// ISBN13 returns a pointer to of the ISBN13 value passed in. +func ISBN13(v strfmt.ISBN13) *strfmt.ISBN13 { + return &v +} + +// ISBN13Value returns the value of the ISBN13 pointer passed in or +// the default value if the pointer is nil. +func ISBN13Value(v *strfmt.ISBN13) strfmt.ISBN13 { + if v == nil { + return strfmt.ISBN13("") + } + + return *v +} + +// CreditCard returns a pointer to of the CreditCard value passed in. +func CreditCard(v strfmt.CreditCard) *strfmt.CreditCard { + return &v +} + +// CreditCardValue returns the value of the CreditCard pointer passed in or +// the default value if the pointer is nil. +func CreditCardValue(v *strfmt.CreditCard) strfmt.CreditCard { + if v == nil { + return strfmt.CreditCard("") + } + + return *v +} + +// SSN returns a pointer to of the SSN value passed in. +func SSN(v strfmt.SSN) *strfmt.SSN { + return &v +} + +// SSNValue returns the value of the SSN pointer passed in or +// the default value if the pointer is nil. +func SSNValue(v *strfmt.SSN) strfmt.SSN { + if v == nil { + return strfmt.SSN("") + } + + return *v +} + +// HexColor returns a pointer to of the HexColor value passed in. +func HexColor(v strfmt.HexColor) *strfmt.HexColor { + return &v +} + +// HexColorValue returns the value of the HexColor pointer passed in or +// the default value if the pointer is nil. +func HexColorValue(v *strfmt.HexColor) strfmt.HexColor { + if v == nil { + return strfmt.HexColor("") + } + + return *v +} + +// RGBColor returns a pointer to of the RGBColor value passed in. +func RGBColor(v strfmt.RGBColor) *strfmt.RGBColor { + return &v +} + +// RGBColorValue returns the value of the RGBColor pointer passed in or +// the default value if the pointer is nil. +func RGBColorValue(v *strfmt.RGBColor) strfmt.RGBColor { + if v == nil { + return strfmt.RGBColor("") + } + + return *v +} + +// Password returns a pointer to of the Password value passed in. +func Password(v strfmt.Password) *strfmt.Password { + return &v +} + +// PasswordValue returns the value of the Password pointer passed in or +// the default value if the pointer is nil. +func PasswordValue(v *strfmt.Password) strfmt.Password { + if v == nil { + return strfmt.Password("") + } + + return *v +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/default_test.go b/vendor/github.com/go-openapi/strfmt/conv/default_test.go new file mode 100644 index 000000000..b182ef741 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/default_test.go @@ -0,0 +1,122 @@ +package conv + +import ( + "testing" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" +) + +func TestBase64Value(t *testing.T) { + assert.Equal(t, strfmt.Base64(nil), Base64Value(nil)) + base64 := strfmt.Base64([]byte{4, 2}) + assert.Equal(t, base64, Base64Value(&base64)) +} + +func TestURIValue(t *testing.T) { + assert.Equal(t, strfmt.URI(""), URIValue(nil)) + value := strfmt.URI("foo") + assert.Equal(t, value, URIValue(&value)) +} + +func TestEmailValue(t *testing.T) { + assert.Equal(t, strfmt.Email(""), EmailValue(nil)) + value := strfmt.Email("foo") + assert.Equal(t, value, EmailValue(&value)) +} + +func TestHostnameValue(t *testing.T) { + assert.Equal(t, strfmt.Hostname(""), HostnameValue(nil)) + value := strfmt.Hostname("foo") + assert.Equal(t, value, HostnameValue(&value)) +} + +func TestIPv4Value(t *testing.T) { + assert.Equal(t, strfmt.IPv4(""), IPv4Value(nil)) + value := strfmt.IPv4("foo") + assert.Equal(t, value, IPv4Value(&value)) +} + +func TestIPv6Value(t *testing.T) { + assert.Equal(t, strfmt.IPv6(""), IPv6Value(nil)) + value := strfmt.IPv6("foo") + assert.Equal(t, value, IPv6Value(&value)) +} + +func TestMACValue(t *testing.T) { + assert.Equal(t, strfmt.MAC(""), MACValue(nil)) + value := strfmt.MAC("foo") + assert.Equal(t, value, MACValue(&value)) +} + +func TestUUIDValue(t *testing.T) { + assert.Equal(t, strfmt.UUID(""), UUIDValue(nil)) + value := strfmt.UUID("foo") + assert.Equal(t, value, UUIDValue(&value)) +} + +func TestUUID3Value(t *testing.T) { + assert.Equal(t, strfmt.UUID3(""), UUID3Value(nil)) + value := strfmt.UUID3("foo") + assert.Equal(t, value, UUID3Value(&value)) +} + +func TestUUID4Value(t *testing.T) { + assert.Equal(t, strfmt.UUID4(""), UUID4Value(nil)) + value := strfmt.UUID4("foo") + assert.Equal(t, value, UUID4Value(&value)) +} + +func TestUUID5Value(t *testing.T) { + assert.Equal(t, strfmt.UUID5(""), UUID5Value(nil)) + value := strfmt.UUID5("foo") + assert.Equal(t, value, UUID5Value(&value)) +} + +func TestISBNValue(t *testing.T) { + assert.Equal(t, strfmt.ISBN(""), ISBNValue(nil)) + value := strfmt.ISBN("foo") + assert.Equal(t, value, ISBNValue(&value)) +} + +func TestISBN10Value(t *testing.T) { + assert.Equal(t, strfmt.ISBN10(""), ISBN10Value(nil)) + value := strfmt.ISBN10("foo") + assert.Equal(t, value, ISBN10Value(&value)) +} + +func TestISBN13Value(t *testing.T) { + assert.Equal(t, strfmt.ISBN13(""), ISBN13Value(nil)) + value := strfmt.ISBN13("foo") + assert.Equal(t, value, ISBN13Value(&value)) +} + +func TestCreditCardValue(t *testing.T) { + assert.Equal(t, strfmt.CreditCard(""), CreditCardValue(nil)) + value := strfmt.CreditCard("foo") + assert.Equal(t, value, CreditCardValue(&value)) +} + +func TestSSNValue(t *testing.T) { + assert.Equal(t, strfmt.SSN(""), SSNValue(nil)) + value := strfmt.SSN("foo") + assert.Equal(t, value, SSNValue(&value)) +} + +func TestHexColorValue(t *testing.T) { + assert.Equal(t, strfmt.HexColor(""), HexColorValue(nil)) + value := strfmt.HexColor("foo") + assert.Equal(t, value, HexColorValue(&value)) +} + +func TestRGBColorValue(t *testing.T) { + assert.Equal(t, strfmt.RGBColor(""), RGBColorValue(nil)) + value := strfmt.RGBColor("foo") + assert.Equal(t, value, RGBColorValue(&value)) +} + +func TestPasswordValue(t *testing.T) { + assert.Equal(t, strfmt.Password(""), PasswordValue(nil)) + value := strfmt.Password("foo") + assert.Equal(t, value, PasswordValue(&value)) +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/duration.go b/vendor/github.com/go-openapi/strfmt/conv/duration.go new file mode 100644 index 000000000..ea30132b5 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/duration.go @@ -0,0 +1,18 @@ +package conv + +import "github.com/go-openapi/strfmt" + +// Duration returns a pointer to of the Duration value passed in. +func Duration(v strfmt.Duration) *strfmt.Duration { + return &v +} + +// DurationValue returns the value of the Duration pointer passed in or +// the default value if the pointer is nil. +func DurationValue(v *strfmt.Duration) strfmt.Duration { + if v == nil { + return strfmt.Duration(0) + } + + return *v +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/duration_test.go b/vendor/github.com/go-openapi/strfmt/conv/duration_test.go new file mode 100644 index 000000000..f19173f4e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/duration_test.go @@ -0,0 +1,14 @@ +package conv + +import ( + "testing" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" +) + +func TestDurationValue(t *testing.T) { + assert.Equal(t, strfmt.Duration(0), DurationValue(nil)) + duration := strfmt.Duration(42) + assert.Equal(t, duration, DurationValue(&duration)) +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/time.go b/vendor/github.com/go-openapi/strfmt/conv/time.go new file mode 100644 index 000000000..627cd12d7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/time.go @@ -0,0 +1,18 @@ +package conv + +import "github.com/go-openapi/strfmt" + +// DateTime returns a pointer to of the DateTime value passed in. +func DateTime(v strfmt.DateTime) *strfmt.DateTime { + return &v +} + +// DateTimeValue returns the value of the DateTime pointer passed in or +// the default value if the pointer is nil. +func DateTimeValue(v *strfmt.DateTime) strfmt.DateTime { + if v == nil { + return strfmt.DateTime{} + } + + return *v +} diff --git a/vendor/github.com/go-openapi/strfmt/conv/time_test.go b/vendor/github.com/go-openapi/strfmt/conv/time_test.go new file mode 100644 index 000000000..89a427df2 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/conv/time_test.go @@ -0,0 +1,15 @@ +package conv + +import ( + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" +) + +func TestDateTimeValue(t *testing.T) { + assert.Equal(t, strfmt.DateTime{}, DateTimeValue(nil)) + time := strfmt.DateTime(time.Now()) + assert.Equal(t, time, DateTimeValue(&time)) +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 000000000..a56f89d81 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,151 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "errors" + "fmt" + "time" + + "gopkg.in/mgo.v2/bson" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.Parse(RFC3339FullDate, string(text)) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + var w jwriter.Writer + d.MarshalEasyJSON(&w) + return w.BuildBytes() +} + +// MarshalEasyJSON writes the Date to a easyjson.Writer +func (d Date) MarshalEasyJSON(w *jwriter.Writer) { + w.String(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + l := jlexer.Lexer{Data: data} + d.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON sets the Date from a easyjson.Lexer +func (d *Date) UnmarshalEasyJSON(in *jlexer.Lexer) { + if data := in.String(); in.Ok() { + tt, err := time.Parse(RFC3339FullDate, data) + if err != nil { + in.AddError(err) + return + } + *d = Date(tt) + } +} + +// GetBSON returns the Date as a bson.M{} map. +func (d *Date) GetBSON() (interface{}, error) { + return bson.M{"data": d.String()}, nil +} + +// SetBSON sets the Date from raw bson data +func (d *Date) SetBSON(raw bson.Raw) error { + var m bson.M + if err := raw.Unmarshal(&m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.Parse(RFC3339FullDate, data) + *d = Date(rd) + return err + } + + return errors.New("couldn't unmarshal bson raw value as Date") +} diff --git a/vendor/github.com/go-openapi/strfmt/date_test.go b/vendor/github.com/go-openapi/strfmt/date_test.go new file mode 100644 index 000000000..bc491f512 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date_test.go @@ -0,0 +1,125 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql" + "database/sql/driver" + "testing" + "time" + + "gopkg.in/mgo.v2/bson" + + "github.com/stretchr/testify/assert" +) + +var _ sql.Scanner = &Date{} +var _ driver.Valuer = Date{} + +func TestDate(t *testing.T) { + pp := Date{} + err := pp.UnmarshalText([]byte{}) + assert.NoError(t, err) + err = pp.UnmarshalText([]byte("yada")) + assert.Error(t, err) + + orig := "2014-12-15" + bj := []byte("\"" + orig + "\"") + err = pp.UnmarshalText([]byte(orig)) + assert.NoError(t, err) + + txt, err := pp.MarshalText() + assert.NoError(t, err) + assert.Equal(t, orig, string(txt)) + + err = pp.UnmarshalJSON(bj) + assert.NoError(t, err) + assert.EqualValues(t, orig, pp.String()) + + err = pp.UnmarshalJSON([]byte(`"1972/01/01"`)) + assert.Error(t, err) + + b, err := pp.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, bj, b) + + dateOriginal := Date(time.Date(2014, 10, 10, 0, 0, 0, 0, time.UTC)) + + bsonData, err := bson.Marshal(&dateOriginal) + assert.NoError(t, err) + + var dateCopy Date + err = bson.Unmarshal(bsonData, &dateCopy) + assert.NoError(t, err) + assert.Equal(t, dateOriginal, dateCopy) + + var dateZero Date + err = dateZero.UnmarshalJSON([]byte(jsonNull)) + assert.NoError(t, err) + assert.Equal(t, Date{}, dateZero) +} + +func TestDate_Scan(t *testing.T) { + ref := time.Now().Truncate(24 * time.Hour).UTC() + date, str := Date(ref), ref.Format(RFC3339FullDate) + + values := []interface{}{str, []byte(str), ref} + for _, value := range values { + result := Date{} + (&result).Scan(value) + assert.Equal(t, date, result, "value: %#v", value) + } + + dd := Date{} + err := dd.Scan(nil) + assert.NoError(t, err) + assert.Equal(t, Date{}, dd) + + err = dd.Scan(19700101) + assert.Error(t, err) +} + +func TestDate_Value(t *testing.T) { + ref := time.Now().Truncate(24 * time.Hour).UTC() + date := Date(ref) + dbv, err := date.Value() + assert.NoError(t, err) + assert.EqualValues(t, dbv, ref.Format("2006-01-02")) +} + +func TestDate_IsDate(t *testing.T) { + tests := []struct { + value string + valid bool + }{ + {"2017-12-22", true}, + {"2017-1-1", false}, + {"17-13-22", false}, + {"2017-02-29", false}, // not a valid date : 2017 is not a leap year + {"1900-02-29", false}, // not a valid date : 1900 is not a leap year + {"2100-02-29", false}, // not a valid date : 2100 is not a leap year + {"2000-02-29", true}, // a valid date : 2000 is a leap year + {"2400-02-29", true}, // a valid date : 2000 is a leap year + {"2017-13-22", false}, + {"2017-12-32", false}, + {"20171-12-32", false}, + {"YYYY-MM-DD", false}, + {"20-17-2017", false}, + {"2017-12-22T01:02:03Z", false}, + } + for _, test := range tests { + assert.Equal(t, test.valid, IsDate(test.value), "value [%s] should be valid: [%t]", test.value, test.valid) + } +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 000000000..6f46d8161 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,1847 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + + "gopkg.in/mgo.v2/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //