From a174acf1e1dc3969a8084d089a84c0f5afb1e1bb Mon Sep 17 00:00:00 2001 From: Mario Manno Date: Mon, 30 Jul 2018 14:29:01 +0200 Subject: [PATCH 1/4] Rename `roles` to `instance_groups` * rename `model.Roles` to `InstanceGroups` * rename `model.RoleJob` to `JobReference` * rename `roles:` key to `instance_groups:` in role manifest * rename some related functions, i.e. `IsColocatedContainerRole` * rename local variables, change comments and error messages * change tests to tag the instance group with the name "some-group" instead of "role" --- Makefile | 2 + app/fissile.go | 74 +-- app/fissile_test.go | 8 +- app/validation.go | 18 +- app/validation_test.go | 2 +- builder/packages_image.go | 22 +- builder/packages_image_test.go | 50 +- builder/role_image.go | 122 ++-- builder/role_image_test.go | 50 +- compilator/compilator.go | 22 +- compilator/compilator_linux.go | 2 +- compilator/compilator_test.go | 2 +- docs/configuration.md | 4 +- kube/deployment.go | 70 +- kube/deployment_test.go | 176 ++--- kube/job.go | 18 +- kube/job_test.go | 32 +- kube/pod.go | 32 +- kube/pod_test.go | 74 +-- kube/service.go | 4 +- kube/service_test.go | 4 +- kube/stateful_set.go | 4 +- kube/stateful_set_test.go | 4 +- kube/values.go | 50 +- kube/values_test.go | 16 +- model/mustache.go | 6 +- model/mustache_test.go | 2 +- model/roles.go | 617 +++++++++--------- model/roles_test.go | 228 +++---- scripts/dockerfiles/Dockerfile-role | 2 +- scripts/dockerfiles/pre-stop.sh | 4 +- scripts/dockerfiles/run.sh | 14 +- test-assets/role-manifests/app/hashmat.yml | 2 +- .../role-manifests/app/roles-to-build.yml | 2 +- .../app/tor-validation-issues.yml | 2 +- .../role-manifests/app/tor-validation-ok.yml | 2 +- test-assets/role-manifests/app/two-roles.yml | 2 +- .../role-manifests/builder/tor-good.yml | 2 +- .../role-manifests/compilator/tor-good.yml | 2 +- .../kube/bosh-run-count-configurable.yml | 2 +- ...ntainers-with-deployment-and-empty-dir.yml | 4 +- ...ainers-with-stateful-set-and-empty-dir.yml | 2 +- .../kube/colocated-containers.yml | 2 +- .../role-manifests/kube/exposed-ports.yml | 2 +- .../kube/job-with-annotation.yml | 4 +- test-assets/role-manifests/kube/jobs.yml | 2 +- .../kube/pod-with-invalid-node-affinity.yml | 4 +- .../kube/pod-with-invalid-pod-affinity.yml | 4 +- .../kube/pod-with-no-pod-anti-affinity.yml | 4 +- .../kube/pod-with-valid-pod-anti-affinity.yml | 6 +- test-assets/role-manifests/kube/pods.yml | 2 +- .../role-manifests/kube/service-headed.yml | 2 +- .../role-manifests/kube/service-headless.yml | 2 +- .../kube/volumes-with-annotation.yml | 2 +- test-assets/role-manifests/kube/volumes.yml | 2 +- .../model/bad-cv-type-internal.yml | 2 +- .../role-manifests/model/bad-cv-type.yml | 2 +- .../role-manifests/model/bosh-run-bad-cpu.yml | 2 +- .../model/bosh-run-bad-memory.yml | 2 +- .../model/bosh-run-bad-parse.yml | 2 +- .../model/bosh-run-bad-port-count.yml | 2 +- .../model/bosh-run-bad-port-names.yml | 2 +- .../model/bosh-run-bad-ports.yml | 2 +- .../model/bosh-run-bad-proto.yml | 2 +- .../role-manifests/model/bosh-run-env.yml | 2 +- .../model/bosh-run-headless-public-port.yml | 2 +- .../model/bosh-run-missing-portrange.yml | 2 +- .../role-manifests/model/bosh-run-missing.yml | 2 +- .../role-manifests/model/bosh-run-ok.yml | 2 +- .../model/bosh-run-reverse-portrange.yml | 2 +- ...olocated-containers-with-clustered-tag.yml | 2 +- ...colocated-containers-with-missing-role.yml | 2 +- ...ated-containers-with-no-port-collision.yml | 2 +- ...located-containers-with-port-collision.yml | 2 +- .../colocated-containers-with-unused-role.yml | 2 +- ...ed-containers-with-volume-share-issues.yml | 2 +- .../model/colocated-containers.yml | 2 +- .../role-manifests/model/docker-run-env.yml | 2 +- .../role-manifests/model/multiple-bad.yml | 2 +- .../role-manifests/model/multiple-good.yml | 2 +- .../role-manifests/model/non-bosh-roles.yml | 2 +- .../model/rbac-missing-account.yml | 2 +- .../role-manifests/model/templates-non.yml | 2 +- test-assets/role-manifests/model/tor-bad.yml | 2 +- test-assets/role-manifests/model/tor-good.yml | 2 +- .../model/variable-expansion.yml | 2 +- .../model/variables-without-decl.yml | 2 +- .../model/variables-without-usage.yml | 2 +- 88 files changed, 931 insertions(+), 930 deletions(-) diff --git a/Makefile b/Makefile index cf210edd..fd0f2045 100755 --- a/Makefile +++ b/Makefile @@ -1,6 +1,8 @@ #!/usr/bin/env make ifeq ($(GIT_ROOT),) + + GIT_ROOT:=$(shell git rev-parse --show-toplevel) endif diff --git a/app/fissile.go b/app/fissile.go index c43fc588..92abe27f 100644 --- a/app/fissile.go +++ b/app/fissile.go @@ -301,7 +301,7 @@ func newPropertyInfo(maybeHash bool) *propertyInfo { } // Compile will compile a list of dev BOSH releases -func (f *Fissile) Compile(stemcellImageName string, targetPath, roleManifestPath, metricsPath string, roleNames, releaseNames []string, workerCount int, dockerNetworkMode string, withoutDocker, verbose bool) error { +func (f *Fissile) Compile(stemcellImageName string, targetPath, roleManifestPath, metricsPath string, instanceGroupNames, releaseNames []string, workerCount int, dockerNetworkMode string, withoutDocker, verbose bool) error { if len(f.releases) == 0 { return fmt.Errorf("Releases not loaded") } @@ -344,12 +344,12 @@ func (f *Fissile) Compile(stemcellImageName string, targetPath, roleManifestPath } } - roles, err := roleManifest.SelectRoles(roleNames) + instanceGroups, err := roleManifest.SelectInstanceGroups(instanceGroupNames) if err != nil { return fmt.Errorf("Error selecting packages to build: %s", err.Error()) } - if err := comp.Compile(workerCount, releases, roles, verbose); err != nil { + if err := comp.Compile(workerCount, releases, instanceGroups, verbose); err != nil { return fmt.Errorf("Error compiling packages: %s", err.Error()) } @@ -420,7 +420,7 @@ func (f *Fissile) CleanCache(targetPath string) error { // GeneratePackagesRoleImage builds the docker image for the packages layer // where all packages are included -func (f *Fissile) GeneratePackagesRoleImage(stemcellImageName string, roleManifest *model.RoleManifest, noBuild, force bool, roles model.Roles, packagesImageBuilder *builder.PackagesImageBuilder, labels map[string]string) error { +func (f *Fissile) GeneratePackagesRoleImage(stemcellImageName string, roleManifest *model.RoleManifest, noBuild, force bool, instanceGroups model.InstanceGroups, packagesImageBuilder *builder.PackagesImageBuilder, labels map[string]string) error { if len(f.releases) == 0 { return fmt.Errorf("Releases not loaded") } @@ -430,9 +430,9 @@ func (f *Fissile) GeneratePackagesRoleImage(stemcellImageName string, roleManife return fmt.Errorf("Error connecting to docker: %s", err.Error()) } - packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, roles, f) + packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, instanceGroups, f) if err != nil { - return fmt.Errorf("Error finding role's package name: %s", err.Error()) + return fmt.Errorf("Error finding instance group's package name: %s", err.Error()) } if !force { if hasImage, err := dockerManager.HasImage(packagesLayerImageName); err == nil && hasImage { @@ -460,7 +460,7 @@ func (f *Fissile) GeneratePackagesRoleImage(stemcellImageName string, roleManife docker.ColoredBuildStringFunc(packagesLayerImageName), ) - tarPopulator := packagesImageBuilder.NewDockerPopulator(roles, labels, force) + tarPopulator := packagesImageBuilder.NewDockerPopulator(instanceGroups, labels, force) err = dockerManager.BuildImageFromCallback(packagesLayerImageName, stdoutWriter, tarPopulator) if err != nil { log.WriteTo(f.UI) @@ -473,14 +473,14 @@ func (f *Fissile) GeneratePackagesRoleImage(stemcellImageName string, roleManife // GeneratePackagesRoleTarball builds a tarball snapshot of the build context // for the docker image for the packages layer where all packages are included -func (f *Fissile) GeneratePackagesRoleTarball(repository string, roleManifest *model.RoleManifest, noBuild, force bool, roles model.Roles, outputDirectory string, packagesImageBuilder *builder.PackagesImageBuilder, labels map[string]string) error { +func (f *Fissile) GeneratePackagesRoleTarball(repository string, roleManifest *model.RoleManifest, noBuild, force bool, instanceGroups model.InstanceGroups, outputDirectory string, packagesImageBuilder *builder.PackagesImageBuilder, labels map[string]string) error { if len(f.releases) == 0 { return fmt.Errorf("Releases not loaded") } - packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, roles, f) + packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, instanceGroups, f) if err != nil { - return fmt.Errorf("Error finding role's package name: %v", err) + return fmt.Errorf("Error finding instance group's package name: %v", err) } outputPath := filepath.Join(outputDirectory, fmt.Sprintf("%s.tar", packagesLayerImageName)) @@ -507,7 +507,7 @@ func (f *Fissile) GeneratePackagesRoleTarball(repository string, roleManifest *m // We always force build all packages here to avoid needing to talk to the // docker daemon to figure out what we can keep - tarPopulator := packagesImageBuilder.NewDockerPopulator(roles, labels, true) + tarPopulator := packagesImageBuilder.NewDockerPopulator(instanceGroups, labels, true) err = tarPopulator(tarWriter) if err != nil { return fmt.Errorf("Error writing tar file: %s", err) @@ -522,7 +522,7 @@ func (f *Fissile) GeneratePackagesRoleTarball(repository string, roleManifest *m } // GenerateRoleImages generates all role images using releases -func (f *Fissile) GenerateRoleImages(targetPath, registry, organization, repository, stemcellImageName, stemcellImageID, metricsPath string, noBuild, force bool, tagExtra string, roleNames []string, workerCount int, roleManifestPath, compiledPackagesPath, lightManifestPath, darkManifestPath, outputDirectory string, labels map[string]string) error { +func (f *Fissile) GenerateRoleImages(targetPath, registry, organization, repository, stemcellImageName, stemcellImageID, metricsPath string, noBuild, force bool, tagExtra string, instanceGroupNames []string, workerCount int, roleManifestPath, compiledPackagesPath, lightManifestPath, darkManifestPath, outputDirectory string, labels map[string]string) error { if len(f.releases) == 0 { return fmt.Errorf("Releases not loaded") } @@ -570,21 +570,21 @@ func (f *Fissile) GenerateRoleImages(targetPath, registry, organization, reposit return err } - roles, err := roleManifest.SelectRoles(roleNames) + instanceGroups, err := roleManifest.SelectInstanceGroups(instanceGroupNames) if err != nil { return err } if outputDirectory == "" { - err = f.GeneratePackagesRoleImage(stemcellImageName, roleManifest, noBuild, force, roles, packagesImageBuilder, labels) + err = f.GeneratePackagesRoleImage(stemcellImageName, roleManifest, noBuild, force, instanceGroups, packagesImageBuilder, labels) } else { - err = f.GeneratePackagesRoleTarball(stemcellImageName, roleManifest, noBuild, force, roles, outputDirectory, packagesImageBuilder, labels) + err = f.GeneratePackagesRoleTarball(stemcellImageName, roleManifest, noBuild, force, instanceGroups, outputDirectory, packagesImageBuilder, labels) } if err != nil { return err } - packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, roles, f) + packagesLayerImageName, err := packagesImageBuilder.GetPackagesLayerImageName(roleManifest, instanceGroups, f) if err != nil { return err } @@ -605,7 +605,7 @@ func (f *Fissile) GenerateRoleImages(targetPath, registry, organization, reposit return err } - return roleBuilder.BuildRoleImages(roles, registry, organization, repository, packagesLayerImageName, outputDirectory, force, noBuild, workerCount) + return roleBuilder.BuildRoleImages(instanceGroups, registry, organization, repository, packagesLayerImageName, outputDirectory, force, noBuild, workerCount) } // ListRoleImages lists all dev role images @@ -638,13 +638,13 @@ func (f *Fissile) ListRoleImages(registry, organization, repository, roleManifes return fmt.Errorf("Error loading opinions: %s", err.Error()) } - for _, role := range roleManifest.Roles { - devVersion, err := role.GetRoleDevVersion(opinions, tagExtra, f.Version, f) + for _, instanceGroup := range roleManifest.InstanceGroups { + devVersion, err := instanceGroup.GetRoleDevVersion(opinions, tagExtra, f.Version, f) if err != nil { - return fmt.Errorf("Error creating role checksum: %s", err.Error()) + return fmt.Errorf("Error creating instance group checksum: %s", err.Error()) } - imageName := builder.GetRoleDevImageName(registry, organization, repository, role, devVersion) + imageName := builder.GetRoleDevImageName(registry, organization, repository, instanceGroup, devVersion) if !existingOnDocker { f.UI.Println(imageName) @@ -1033,9 +1033,9 @@ func (f *Fissile) writeHelmNode(dirName, fileName string, node helm.Node) error return err } -func (f *Fissile) generateBoshTaskRole(outputFile *os.File, role *model.Role, settings kube.ExportSettings) error { - if role.HasTag(model.RoleTagStopOnFailure) { - pod, err := kube.NewPod(role, settings, f) +func (f *Fissile) generateBoshTaskRole(outputFile *os.File, instanceGroup *model.InstanceGroup, settings kube.ExportSettings) error { + if instanceGroup.HasTag(model.RoleTagStopOnFailure) { + pod, err := kube.NewPod(instanceGroup, settings, f) if err != nil { return err } @@ -1044,7 +1044,7 @@ func (f *Fissile) generateBoshTaskRole(outputFile *os.File, role *model.Role, se return err } } else { - job, err := kube.NewJob(role, settings, f) + job, err := kube.NewJob(instanceGroup, settings, f) if err != nil { return err } @@ -1057,10 +1057,10 @@ func (f *Fissile) generateBoshTaskRole(outputFile *os.File, role *model.Role, se return nil } -// roleHasStorage returns true if a given role uses shared or +// instanceGroupHasStorage returns true if a given group uses shared or // persistent volumes. -func (f *Fissile) roleHasStorage(role *model.Role) bool { - for _, volume := range role.Run.Volumes { +func (f *Fissile) instanceGroupHasStorage(instanceGroup *model.InstanceGroup) bool { + for _, volume := range instanceGroup.Run.Volumes { switch volume.Type { case model.VolumeTypePersistent, model.VolumeTypeShared: return true @@ -1070,15 +1070,15 @@ func (f *Fissile) roleHasStorage(role *model.Role) bool { } func (f *Fissile) generateKubeRoles(settings kube.ExportSettings) error { - for _, role := range settings.RoleManifest.Roles { - if role.IsColocatedContainerRole() { + for _, instanceGroup := range settings.RoleManifest.InstanceGroups { + if instanceGroup.IsColocated() { continue } - if settings.CreateHelmChart && role.Run.FlightStage == model.FlightStageManual { + if settings.CreateHelmChart && instanceGroup.Run.FlightStage == model.FlightStageManual { continue } - subDir := string(role.Type) + subDir := string(instanceGroup.Type) if settings.CreateHelmChart { subDir = "templates" } @@ -1087,11 +1087,11 @@ func (f *Fissile) generateKubeRoles(settings kube.ExportSettings) error { if err != nil { return err } - outputPath := filepath.Join(roleTypeDir, fmt.Sprintf("%s.yaml", role.Name)) + outputPath := filepath.Join(roleTypeDir, fmt.Sprintf("%s.yaml", instanceGroup.Name)) f.UI.Printf("Writing config %s for role %s\n", color.CyanString(outputPath), - color.CyanString(role.Name), + color.CyanString(instanceGroup.Name), ) outputFile, err := os.Create(outputPath) @@ -1100,9 +1100,9 @@ func (f *Fissile) generateKubeRoles(settings kube.ExportSettings) error { } defer outputFile.Close() - switch role.Type { + switch instanceGroup.Type { case model.RoleTypeBoshTask: - err := f.generateBoshTaskRole(outputFile, role, settings) + err := f.generateBoshTaskRole(outputFile, instanceGroup, settings) if err != nil { return err } @@ -1110,7 +1110,7 @@ func (f *Fissile) generateKubeRoles(settings kube.ExportSettings) error { case model.RoleTypeBosh: enc := helm.NewEncoder(outputFile) - statefulSet, deps, err := kube.NewStatefulSet(role, settings, f) + statefulSet, deps, err := kube.NewStatefulSet(instanceGroup, settings, f) if err != nil { return err } diff --git a/app/fissile_test.go b/app/fissile_test.go index 0a722058..548f644f 100644 --- a/app/fissile_test.go +++ b/app/fissile_test.go @@ -577,20 +577,20 @@ func TestFissileSelectRolesToBuild(t *testing.T) { }, { roleNames: []string{"missing_role"}, - err: "Some roles are unknown: [missing_role]", + err: "Some instance groups are unknown: [missing_role]", }, } for _, sample := range testSamples { t.Run(strings.Join(sample.roleNames, ","), func(t *testing.T) { - results, err := roleManifest.SelectRoles(sample.roleNames) + results, err := roleManifest.SelectInstanceGroups(sample.roleNames) if sample.err != "" { assert.EqualError(t, err, sample.err, "while testing %v", sample.roleNames) } else { assert.NoError(t, err, "while testing %v", sample.roleNames) var actualNames []string - for _, role := range results { - actualNames = append(actualNames, role.Name) + for _, instanceGroup := range results { + actualNames = append(actualNames, instanceGroup.Name) } sort.Strings(actualNames) assert.Equal(t, sample.expectedNames, actualNames, "while testing %v", sample.roleNames) diff --git a/app/validation.go b/app/validation.go index 6622792a..3535af43 100644 --- a/app/validation.go +++ b/app/validation.go @@ -124,13 +124,13 @@ func checkParentsOfUndefined(p string, bosh propertyDefaults) bool { } // collectManifestProperties returns a map merging the global and -// per-role properties/templates into a single structure. +// per-instance-group properties/templates into a single structure. func collectManifestProperties(roleManifest *model.RoleManifest) map[string]string { properties := make(map[string]string) - // Per-role properties - for _, role := range roleManifest.Roles { - for property, template := range role.Configuration.Templates { + // Per-instance-group properties + for _, instanceGroup := range roleManifest.InstanceGroups { + for property, template := range instanceGroup.Configuration.Templates { properties[property] = template } } @@ -189,13 +189,13 @@ func checkForDuplicatesBetweenManifestAndLight(light map[string]string, roleMani check[property] = struct{}{} } - // ... then the per-role properties - for _, role := range roleManifest.Roles { - prefix := fmt.Sprintf("roles[%s].configuration.templates", role.Name) + // ... then the per-instance-group properties + for _, instanceGroup := range roleManifest.InstanceGroups { + prefix := fmt.Sprintf("instance-groups[%s].configuration.templates", instanceGroup.Name) - for property, template := range role.Configuration.Templates { + for property, template := range instanceGroup.Configuration.Templates { // Skip over duplicates of the global - // properties in the per-role data, we already + // properties in the per-instance-group data, we already // checked them, see above. if _, ok := check[property]; ok { continue diff --git a/app/validation_test.go b/app/validation_test.go index 371c6eba..1925a628 100644 --- a/app/validation_test.go +++ b/app/validation_test.go @@ -56,7 +56,7 @@ func TestValidation(t *testing.T) { `properties.tor.masked_opinion: Forbidden: Dark opinion found in light opinions`, // checkForDuplicatesBetweenManifestAndLight `configuration.templates[properties.tor.hostname]: Forbidden: Role-manifest overrides opinion, remove opinion`, - `roles[myrole].configuration.templates[properties.tor.bogus]: Forbidden: Role-manifest duplicates opinion, remove from manifest`, + `instance-groups[myrole].configuration.templates[properties.tor.bogus]: Forbidden: Role-manifest duplicates opinion, remove from manifest`, // checkForUndefinedBOSHProperties light, manifest - For the bogus property used above for checkOverridden `role-manifest 'tor.bogus': Not found: "In any BOSH release"`, `light opinion 'tor.bogus': Not found: "In any BOSH release"`, diff --git a/builder/packages_image.go b/builder/packages_image.go index 9a432387..fb98d19f 100644 --- a/builder/packages_image.go +++ b/builder/packages_image.go @@ -176,21 +176,21 @@ func (p *PackagesImageBuilder) determinePackagesLayerBaseImage(packages model.Pa } // NewDockerPopulator returns a function which can populate a tar stream with the docker context to build the packages layer image with -func (p *PackagesImageBuilder) NewDockerPopulator(roles model.Roles, labels map[string]string, forceBuildAll bool) func(*tar.Writer) error { +func (p *PackagesImageBuilder) NewDockerPopulator(instanceGroups model.InstanceGroups, labels map[string]string, forceBuildAll bool) func(*tar.Writer) error { return func(tarWriter *tar.Writer) error { var err error - if len(roles) == 0 { - return fmt.Errorf("No roles to build") + if len(instanceGroups) == 0 { + return fmt.Errorf("No instance groups to build") } // Collect compiled packages foundFingerprints := make(map[string]struct{}) var packages model.Packages - for _, role := range roles { - for _, roleJob := range role.RoleJobs { - for _, pkg := range roleJob.Packages { + for _, instanceGroup := range instanceGroups { + for _, jobReference := range instanceGroup.JobReferences { + for _, pkg := range jobReference.Packages { if _, ok := foundFingerprints[pkg.Fingerprint]; ok { - // Package has already been found (possibly due to a different role) + // Package has already been found (possibly due to a different instance group) continue } packages = append(packages, pkg) @@ -266,12 +266,12 @@ func (p *PackagesImageBuilder) generateDockerfile(baseImage string, packages mod return dockerfileTemplate.Execute(outputFile, context) } -// GetPackagesLayerImageName generates a docker image name for the amalgamation holding all packages used in the specified roles -func (p *PackagesImageBuilder) GetPackagesLayerImageName(roleManifest *model.RoleManifest, roles model.Roles, grapher util.ModelGrapher) (string, error) { +// GetPackagesLayerImageName generates a docker image name for the amalgamation holding all packages used in the specified instance group +func (p *PackagesImageBuilder) GetPackagesLayerImageName(roleManifest *model.RoleManifest, instanceGroups model.InstanceGroups, grapher util.ModelGrapher) (string, error) { // Get the list of packages; use the fingerprint to ensure we have no repeats pkgMap := make(map[string]*model.Package) - for _, r := range roles { - for _, j := range r.RoleJobs { + for _, r := range instanceGroups { + for _, j := range r.JobReferences { for _, pkg := range j.Packages { pkgMap[pkg.Fingerprint] = pkg } diff --git a/builder/packages_image_test.go b/builder/packages_image_test.go index 48eb5a0b..ecb53c9c 100644 --- a/builder/packages_image_test.go +++ b/builder/packages_image_test.go @@ -124,12 +124,12 @@ func TestNewDockerPopulator(t *testing.T) { tarFile := &bytes.Buffer{} - tarPopulator := packagesImageBuilder.NewDockerPopulator(roleManifest.Roles, labels, false) + tarPopulator := packagesImageBuilder.NewDockerPopulator(roleManifest.InstanceGroups, labels, false) tarWriter := tar.NewWriter(tarFile) assert.NoError(tarPopulator(tarWriter)) assert.NoError(tarWriter.Close()) - pkg := getPackage(roleManifest.Roles, "myrole", "tor", "tor") + pkg := getPackage(roleManifest.InstanceGroups, "myrole", "tor", "tor") if !assert.NotNil(pkg) { return } @@ -164,8 +164,8 @@ func TestNewDockerPopulator(t *testing.T) { func() { expected := []string{ "LABEL", - fmt.Sprintf(`"fingerprint.%s"="libevent"`, getPackage(roleManifest.Roles, "myrole", "tor", "libevent").Fingerprint), - fmt.Sprintf(`"fingerprint.%s"="tor"`, getPackage(roleManifest.Roles, "myrole", "tor", "tor").Fingerprint), + fmt.Sprintf(`"fingerprint.%s"="libevent"`, getPackage(roleManifest.InstanceGroups, "myrole", "tor", "libevent").Fingerprint), + fmt.Sprintf(`"fingerprint.%s"="tor"`, getPackage(roleManifest.InstanceGroups, "myrole", "tor", "tor").Fingerprint), } actual := strings.Fields(line) sort.Strings(expected[1:]) @@ -243,11 +243,11 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) builder.fissileVersion += ".4.5.6" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing fissile version should change package layer hash") @@ -261,11 +261,11 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) builder.stemcellImageID = "stemcell:newer" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing stemcell image ID should change package layer hash") @@ -280,11 +280,11 @@ func TestGetRolePackageImageName(t *testing.T) { fissileVersion: "0.1.2", stemcellImageID: "stemcell:latest", } - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) builder.repository = "repository" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing repository should change package layer hash") @@ -302,7 +302,7 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) yamlRaw, err := ioutil.ReadFile(roleManifestPath) @@ -323,7 +323,7 @@ func TestGetRolePackageImageName(t *testing.T) { modifiedRoleManifest, err := model.LoadRoleManifest(tempManifestFile.Name(), []*model.Release{release}, nil) assert.NoError(t, err, "Error loading modified role manifest") - newImageName, err := builder.GetPackagesLayerImageName(modifiedRoleManifest, modifiedRoleManifest.Roles, nil) + newImageName, err := builder.GetPackagesLayerImageName(modifiedRoleManifest, modifiedRoleManifest.InstanceGroups, nil) assert.NoError(t, err) assert.Equal(t, oldImageName, newImageName, "Changing templates should not change image hash") }) @@ -338,16 +338,16 @@ func TestGetRolePackageImageName(t *testing.T) { oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, nil, nil) assert.NoError(t, err) - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.Roles, nil) + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, roleManifest.InstanceGroups, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing roles should change package layer hash") }) - makeTemplateRole := func() *model.Role { - return &model.Role{ + makeTemplateRole := func() *model.InstanceGroup { + return &model.InstanceGroup{ Name: "test-role", - RoleJobs: []*model.RoleJob{ + JobReferences: []*model.JobReference{ { Name: "test-job", Job: &model.Job{ @@ -377,11 +377,11 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } role := makeTemplateRole() - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) - role.RoleJobs[0].Packages[0].SHA1 = "different sha1" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + role.JobReferences[0].Packages[0].SHA1 = "different sha1" + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing package SHA1 should change package layer hash") @@ -395,11 +395,11 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } role := makeTemplateRole() - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) - role.RoleJobs[0].Packages[0].Fingerprint = "different fingerprint" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + role.JobReferences[0].Packages[0].Fingerprint = "different fingerprint" + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing package fingerprint should change package layer hash") @@ -413,11 +413,11 @@ func TestGetRolePackageImageName(t *testing.T) { stemcellImageID: "stemcell:latest", } role := makeTemplateRole() - oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + oldImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) - role.RoleJobs[0].Packages[0].Name = "different name" - newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.Roles{role}, nil) + role.JobReferences[0].Packages[0].Name = "different name" + newImageName, err := builder.GetPackagesLayerImageName(roleManifest, model.InstanceGroups{role}, nil) assert.NoError(t, err) assert.NotEqual(t, oldImageName, newImageName, "Changing package name should change package layer hash") diff --git a/builder/role_image.go b/builder/role_image.go index 076c61c3..95075faa 100644 --- a/builder/role_image.go +++ b/builder/role_image.go @@ -75,23 +75,23 @@ func NewRoleImageBuilder(repository, compiledPackagesPath, targetPath, lightOpin } // NewDockerPopulator returns a function which can populate a tar stream with the docker context to build the packages layer image with -func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName string) func(*tar.Writer) error { +func (r *RoleImageBuilder) NewDockerPopulator(instanceGroup *model.InstanceGroup, baseImageName string) func(*tar.Writer) error { return func(tarWriter *tar.Writer) error { - if len(role.RoleJobs) == 0 { - return fmt.Errorf("Error - role %s has 0 jobs", role.Name) + if len(instanceGroup.JobReferences) == 0 { + return fmt.Errorf("Error - instance group %s has 0 jobs", instanceGroup.Name) } // Write out release license files releaseLicensesWritten := map[string]struct{}{} - for _, roleJob := range role.RoleJobs { - if _, ok := releaseLicensesWritten[roleJob.Release.Name]; !ok { - if len(roleJob.Release.License.Files) == 0 { + for _, jobReference := range instanceGroup.JobReferences { + if _, ok := releaseLicensesWritten[jobReference.Release.Name]; !ok { + if len(jobReference.Release.License.Files) == 0 { continue } - releaseDir := filepath.Join("root/opt/fissile/share/doc", roleJob.Release.Name) + releaseDir := filepath.Join("root/opt/fissile/share/doc", jobReference.Release.Name) - for filename, contents := range roleJob.Release.License.Files { + for filename, contents := range jobReference.Release.License.Files { err := util.WriteToTarStream(tarWriter, contents, tar.Header{ Name: filepath.Join(releaseDir, filename), }) @@ -99,14 +99,14 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st return fmt.Errorf("failed to write out release license file %s: %v", filename, err) } } - releaseLicensesWritten[roleJob.Release.Name] = struct{}{} + releaseLicensesWritten[jobReference.Release.Name] = struct{}{} } } // Symlink compiled packages packageSet := map[string]string{} - for _, roleJob := range role.RoleJobs { - for _, pkg := range roleJob.Packages { + for _, jobReference := range instanceGroup.JobReferences { + for _, pkg := range jobReference.Packages { if _, ok := packageSet[pkg.Name]; !ok { err := util.WriteToTarStream(tarWriter, nil, tar.Header{ Name: filepath.Join("root/var/vcap/packages", pkg.Name), @@ -127,24 +127,24 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } // Copy jobs templates, spec configs and monit - for _, roleJob := range role.RoleJobs { + for _, jobReference := range instanceGroup.JobReferences { templates := make(map[string]*model.JobTemplate) - for _, template := range roleJob.Templates { + for _, template := range jobReference.Templates { sourcePath := filepath.Clean(filepath.Join("templates", template.SourcePath)) templates[filepath.ToSlash(sourcePath)] = template } - sourceTgz, err := os.Open(roleJob.Path) + sourceTgz, err := os.Open(jobReference.Path) if err != nil { - return fmt.Errorf("Error reading archive for job %s (%s): %s", roleJob.Name, roleJob.Path, err) + return fmt.Errorf("Error reading archive for job %s (%s): %s", jobReference.Name, jobReference.Path, err) } defer sourceTgz.Close() - err = util.TargzIterate(roleJob.Path, sourceTgz, func(reader *tar.Reader, header *tar.Header) error { + err = util.TargzIterate(jobReference.Path, sourceTgz, func(reader *tar.Reader, header *tar.Header) error { filePath := filepath.ToSlash(filepath.Clean(header.Name)) if filePath == "job.MF" { return nil } - header.Name = filepath.Join("root/var/vcap/jobs-src", roleJob.Name, header.Name) + header.Name = filepath.Join("root/var/vcap/jobs-src", jobReference.Name, header.Name) if template, ok := templates[filePath]; ok { if strings.HasPrefix(template.DestinationPath, fmt.Sprintf("%s%c", binPrefix, os.PathSeparator)) { header.Mode = 0755 @@ -153,26 +153,26 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } } if err = tarWriter.WriteHeader(header); err != nil { - return fmt.Errorf("Error writing header %s for job %s: %s", filePath, roleJob.Name, err) + return fmt.Errorf("Error writing header %s for job %s: %s", filePath, jobReference.Name, err) } if _, err = io.Copy(tarWriter, reader); err != nil { - return fmt.Errorf("Error writing %s for job %s: %s", filePath, roleJob.Name, err) + return fmt.Errorf("Error writing %s for job %s: %s", filePath, jobReference.Name, err) } return nil }) // Write spec into /var/vcap/job-src//config_spec.json - configJSON, err := roleJob.WriteConfigs(role, r.lightOpinionsPath, r.darkOpinionsPath) + configJSON, err := jobReference.WriteConfigs(instanceGroup, r.lightOpinionsPath, r.darkOpinionsPath) if err != nil { return err } util.WriteToTarStream(tarWriter, configJSON, tar.Header{ - Name: filepath.Join("root/var/vcap/jobs-src", roleJob.Name, jobConfigSpecFilename), + Name: filepath.Join("root/var/vcap/jobs-src", jobReference.Name, jobConfigSpecFilename), }) } // Copy role startup scripts - for script, sourceScriptPath := range role.GetScriptPaths() { + for script, sourceScriptPath := range instanceGroup.GetScriptPaths() { err := util.CopyFileToTarStream(tarWriter, sourceScriptPath, &tar.Header{ Name: filepath.Join("root/opt/fissile/startup", script), }) @@ -182,7 +182,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } // Generate run script - runScriptContents, err := r.generateRunScript(role, "run.sh") + runScriptContents, err := r.generateRunScript(instanceGroup, "run.sh") if err != nil { return err } @@ -194,7 +194,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st return err } - preStopScriptContents, err := r.generateRunScript(role, "pre-stop.sh") + preStopScriptContents, err := r.generateRunScript(instanceGroup, "pre-stop.sh") if err != nil { return err } @@ -206,7 +206,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st return err } - jobsConfigContents, err := r.generateJobsConfig(role) + jobsConfigContents, err := r.generateJobsConfig(instanceGroup) if err != nil { return err } @@ -218,7 +218,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } // Copy readiness probe script - readinessProbeScriptContents, err := r.generateRunScript(role, "readiness-probe.sh") + readinessProbeScriptContents, err := r.generateRunScript(instanceGroup, "readiness-probe.sh") if err != nil { return err } @@ -231,7 +231,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } // Create env2conf templates file in /opt/fissile/env2conf.yml - configTemplatesBytes, err := yaml.Marshal(role.Configuration.Templates) + configTemplatesBytes, err := yaml.Marshal(instanceGroup.Configuration.Templates) if err != nil { return err } @@ -244,7 +244,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st // Generate Dockerfile buf := &bytes.Buffer{} - if err := r.generateDockerfile(role, baseImageName, buf); err != nil { + if err := r.generateDockerfile(instanceGroup, baseImageName, buf); err != nil { return err } err = util.WriteToTarStream(tarWriter, buf.Bytes(), tar.Header{ @@ -258,7 +258,7 @@ func (r *RoleImageBuilder) NewDockerPopulator(role *model.Role, baseImageName st } } -func (r *RoleImageBuilder) generateRunScript(role *model.Role, assetName string) ([]byte, error) { +func (r *RoleImageBuilder) generateRunScript(instanceGroup *model.InstanceGroup, assetName string) ([]byte, error) { asset, err := dockerfiles.Asset(assetName) if err != nil { return nil, err @@ -274,7 +274,7 @@ func (r *RoleImageBuilder) generateRunScript(role *model.Role, assetName string) }, }) context := map[string]interface{}{ - "role": role, + "instance_group": instanceGroup, } runScriptTemplate, err = runScriptTemplate.Parse(string(asset)) if err != nil { @@ -290,26 +290,26 @@ func (r *RoleImageBuilder) generateRunScript(role *model.Role, assetName string) return output.Bytes(), nil } -func (r *RoleImageBuilder) generateJobsConfig(role *model.Role) ([]byte, error) { +func (r *RoleImageBuilder) generateJobsConfig(instanceGroup *model.InstanceGroup) ([]byte, error) { jobsConfig := make(map[string]map[string]interface{}) - for index, roleJob := range role.RoleJobs { - jobsConfig[roleJob.Name] = make(map[string]interface{}) - jobsConfig[roleJob.Name]["base"] = fmt.Sprintf("/var/vcap/jobs-src/%s/config_spec.json", roleJob.Name) + for index, jobReference := range instanceGroup.JobReferences { + jobsConfig[jobReference.Name] = make(map[string]interface{}) + jobsConfig[jobReference.Name]["base"] = fmt.Sprintf("/var/vcap/jobs-src/%s/config_spec.json", jobReference.Name) files := make(map[string]string) - for _, file := range roleJob.Templates { + for _, file := range jobReference.Templates { src := fmt.Sprintf("/var/vcap/jobs-src/%s/templates/%s", - roleJob.Name, file.SourcePath) + jobReference.Name, file.SourcePath) dest := fmt.Sprintf("/var/vcap/jobs/%s/%s", - roleJob.Name, file.DestinationPath) + jobReference.Name, file.DestinationPath) files[src] = dest } - if role.Type != "bosh-task" { - src := fmt.Sprintf("/var/vcap/jobs-src/%s/monit", roleJob.Name) - dest := fmt.Sprintf("/var/vcap/monit/%s.monitrc", roleJob.Name) + if instanceGroup.Type != "bosh-task" { + src := fmt.Sprintf("/var/vcap/jobs-src/%s/monit", jobReference.Name) + dest := fmt.Sprintf("/var/vcap/monit/%s.monitrc", jobReference.Name) files[src] = dest if index == 0 { @@ -317,7 +317,7 @@ func (r *RoleImageBuilder) generateJobsConfig(role *model.Role) ([]byte, error) } } - jobsConfig[roleJob.Name]["files"] = files + jobsConfig[jobReference.Name]["files"] = files } jsonOut, err := json.Marshal(jobsConfig) @@ -329,7 +329,7 @@ func (r *RoleImageBuilder) generateJobsConfig(role *model.Role) ([]byte, error) } // generateDockerfile builds a docker file for a given role. -func (r *RoleImageBuilder) generateDockerfile(role *model.Role, baseImageName string, outputFile io.Writer) error { +func (r *RoleImageBuilder) generateDockerfile(instanceGroup *model.InstanceGroup, baseImageName string, outputFile io.Writer) error { asset, err := dockerfiles.Asset("Dockerfile-role") if err != nil { return err @@ -338,9 +338,9 @@ func (r *RoleImageBuilder) generateDockerfile(role *model.Role, baseImageName st dockerfileTemplate := template.New("Dockerfile-role") context := map[string]interface{}{ - "base_image": baseImageName, - "role": role, - "licenses": role.RoleJobs[0].Release.License.Files, + "base_image": baseImageName, + "instance_group": instanceGroup, + "licenses": instanceGroup.JobReferences[0].Release.License.Files, } dockerfileTemplate, err = dockerfileTemplate.Parse(string(asset)) @@ -352,7 +352,7 @@ func (r *RoleImageBuilder) generateDockerfile(role *model.Role, baseImageName st } type roleBuildJob struct { - role *model.Role + instanceGroup *model.InstanceGroup builder *RoleImageBuilder ui *termui.UI grapher util.ModelGrapher @@ -382,7 +382,7 @@ func (j roleBuildJob) Run() { return err } - devVersion, err := j.role.GetRoleDevVersion(opinions, j.builder.tagExtra, j.builder.fissileVersion, j.grapher) + devVersion, err := j.instanceGroup.GetRoleDevVersion(opinions, j.builder.tagExtra, j.builder.fissileVersion, j.grapher) if err != nil { return err } @@ -395,10 +395,10 @@ func (j roleBuildJob) Run() { var outputPath string if j.outputDirectory == "" { - roleImageName = GetRoleDevImageName(j.registry, j.organization, j.repository, j.role, devVersion) + roleImageName = GetRoleDevImageName(j.registry, j.organization, j.repository, j.instanceGroup, devVersion) outputPath = fmt.Sprintf("%s.tar", roleImageName) } else { - roleImageName = GetRoleDevImageName("", "", j.repository, j.role, devVersion) + roleImageName = GetRoleDevImageName("", "", j.repository, j.instanceGroup, devVersion) outputPath = filepath.Join(j.outputDirectory, fmt.Sprintf("%s.tar", roleImageName)) } @@ -407,7 +407,7 @@ func (j roleBuildJob) Run() { if hasImage, err := j.dockerManager.HasImage(roleImageName); err != nil { return err } else if hasImage { - j.ui.Printf("Skipping build of role image %s because it exists\n", color.YellowString(j.role.Name)) + j.ui.Printf("Skipping build of role image %s because it exists\n", color.YellowString(j.instanceGroup.Name)) return nil } } else { @@ -432,16 +432,16 @@ func (j roleBuildJob) Run() { defer stampy.Stamp(j.builder.metricsPath, "fissile", seriesName, "done") } - j.ui.Printf("Creating Dockerfile for role %s ...\n", color.YellowString(j.role.Name)) - dockerPopulator := j.builder.NewDockerPopulator(j.role, j.baseImageName) + j.ui.Printf("Creating Dockerfile for role %s ...\n", color.YellowString(j.instanceGroup.Name)) + dockerPopulator := j.builder.NewDockerPopulator(j.instanceGroup, j.baseImageName) if j.noBuild { - j.ui.Printf("Skipping build of role image %s because of flag\n", color.YellowString(j.role.Name)) + j.ui.Printf("Skipping build of role image %s because of flag\n", color.YellowString(j.instanceGroup.Name)) return nil } if j.outputDirectory == "" { - j.ui.Printf("Building docker image of %s...\n", color.YellowString(j.role.Name)) + j.ui.Printf("Building docker image of %s...\n", color.YellowString(j.instanceGroup.Name)) log := new(bytes.Buffer) stdoutWriter := docker.NewFormattingWriter( @@ -455,7 +455,7 @@ func (j roleBuildJob) Run() { return fmt.Errorf("Error building image: %s", err.Error()) } } else { - j.ui.Printf("Building tarball of %s...\n", color.YellowString(j.role.Name)) + j.ui.Printf("Building tarball of %s...\n", color.YellowString(j.instanceGroup.Name)) tarFile, err := os.Create(outputPath) if err != nil { @@ -478,7 +478,7 @@ func (j roleBuildJob) Run() { } // BuildRoleImages triggers the building of the role docker images in parallel -func (r *RoleImageBuilder) BuildRoleImages(roles model.Roles, registry, organization, repository, baseImageName, outputDirectory string, force, noBuild bool, workerCount int) error { +func (r *RoleImageBuilder) BuildRoleImages(instanceGroups model.InstanceGroups, registry, organization, repository, baseImageName, outputDirectory string, force, noBuild bool, workerCount int) error { if workerCount < 1 { return fmt.Errorf("Invalid worker count %d", workerCount) } @@ -499,9 +499,9 @@ func (r *RoleImageBuilder) BuildRoleImages(roles model.Roles, registry, organiza resultsCh := make(chan error) abort := make(chan struct{}) - for _, role := range roles { + for _, instanceGroup := range instanceGroups { worker.Add(roleBuildJob{ - role: role, + instanceGroup: instanceGroup, builder: r, ui: r.ui, grapher: r.grapher, @@ -521,7 +521,7 @@ func (r *RoleImageBuilder) BuildRoleImages(roles model.Roles, registry, organiza go worker.RunUntilDone() aborted := false - for i := 0; i < len(roles); i++ { + for i := 0; i < len(instanceGroups); i++ { result := <-resultsCh if result != nil { if !aborted { @@ -536,7 +536,7 @@ func (r *RoleImageBuilder) BuildRoleImages(roles model.Roles, registry, organiza } // GetRoleDevImageName generates a docker image name to be used as a dev role image -func GetRoleDevImageName(registry, organization, repository string, role *model.Role, version string) string { +func GetRoleDevImageName(registry, organization, repository string, instanceGroup *model.InstanceGroup, version string) string { var imageName string if registry != "" { imageName = registry + "/" @@ -546,7 +546,7 @@ func GetRoleDevImageName(registry, organization, repository string, role *model. imageName += util.SanitizeDockerName(organization) + "/" } - imageName += util.SanitizeDockerName(fmt.Sprintf("%s-%s", repository, role.Name)) + imageName += util.SanitizeDockerName(fmt.Sprintf("%s-%s", repository, instanceGroup.Name)) return fmt.Sprintf("%s:%s", imageName, util.SanitizeDockerName(version)) } diff --git a/builder/role_image_test.go b/builder/role_image_test.go index 102c4156..2fab9c6b 100644 --- a/builder/role_image_test.go +++ b/builder/role_image_test.go @@ -54,7 +54,7 @@ func TestGenerateRoleImageDockerfile(t *testing.T) { var dockerfileContents bytes.Buffer baseImage := roleImageBuilder.repository - err = roleImageBuilder.generateDockerfile(roleManifest.Roles[0], baseImage, &dockerfileContents) + err = roleImageBuilder.generateDockerfile(roleManifest.InstanceGroups[0], baseImage, &dockerfileContents) assert.NoError(err) dockerfileString := dockerfileContents.String() @@ -62,12 +62,12 @@ func TestGenerateRoleImageDockerfile(t *testing.T) { assert.Contains(dockerfileString, "MAINTAINER", "release images should contain maintainer information") assert.Contains( dockerfileString, - fmt.Sprintf(`LABEL "role"="%s"`, roleManifest.Roles[0].Name), + fmt.Sprintf(`LABEL "instance_group"="%s"`, roleManifest.InstanceGroups[0].Name), "Expected role label", ) dockerfileContents.Reset() - err = roleImageBuilder.generateDockerfile(roleManifest.Roles[0], baseImage, &dockerfileContents) + err = roleImageBuilder.generateDockerfile(roleManifest.InstanceGroups[0], baseImage, &dockerfileContents) assert.NoError(err) dockerfileString = dockerfileContents.String() assert.Contains(dockerfileString, "MAINTAINER", "dev mode should generate a maintainer layer") @@ -105,7 +105,7 @@ func TestGenerateRoleImageRunScript(t *testing.T) { roleImageBuilder, err := NewRoleImageBuilder("foo", compiledPackagesDir, targetPath, lightOpinionsPath, darkOpinionsPath, "", "deadbeef", "6.28.30", ui, nil) assert.NoError(err) - runScriptContents, err := roleImageBuilder.generateRunScript(roleManifest.Roles[0], "run.sh") + runScriptContents, err := roleImageBuilder.generateRunScript(roleManifest.InstanceGroups[0], "run.sh") if assert.NoError(err) { assert.Contains(string(runScriptContents), "source /opt/fissile/startup/environ.sh") assert.Contains(string(runScriptContents), "source /environ/script/with/absolute/path.sh") @@ -122,13 +122,13 @@ func TestGenerateRoleImageRunScript(t *testing.T) { assert.Contains(string(runScriptContents), "monit -vI &") } - runScriptContents, err = roleImageBuilder.generateRunScript(roleManifest.Roles[1], "run.sh") + runScriptContents, err = roleImageBuilder.generateRunScript(roleManifest.InstanceGroups[1], "run.sh") if assert.NoError(err) { assert.NotContains(string(runScriptContents), "monit -vI") assert.Contains(string(runScriptContents), "/var/vcap/jobs/tor/bin/run") } - preStopScriptContents, err := roleImageBuilder.generateRunScript(roleManifest.Roles[0], "pre-stop.sh") + preStopScriptContents, err := roleImageBuilder.generateRunScript(roleManifest.InstanceGroups[0], "pre-stop.sh") if assert.NoError(err) { var wantedLine string for _, line := range strings.Split(string(preStopScriptContents), "\n") { @@ -176,14 +176,14 @@ func TestGenerateRoleImageJobsConfig(t *testing.T) { roleImageBuilder, err := NewRoleImageBuilder("foo", compiledPackagesDir, targetPath, lightOpinionsPath, darkOpinionsPath, "", "deadbeef", "6.28.30", ui, nil) assert.NoError(err) - jobsConfigContents, err := roleImageBuilder.generateJobsConfig(roleManifest.Roles[0]) + jobsConfigContents, err := roleImageBuilder.generateJobsConfig(roleManifest.InstanceGroups[0]) assert.NoError(err) assert.Contains(string(jobsConfigContents), "/var/vcap/jobs/tor/bin/tor_ctl") assert.Contains(string(jobsConfigContents), "/var/vcap/jobs-src/tor/templates/data/properties.sh.erb") assert.Contains(string(jobsConfigContents), "/etc/monitrc") assert.Contains(string(jobsConfigContents), "/var/vcap/jobs/new_hostname/bin/run") - jobsConfigContents, err = roleImageBuilder.generateJobsConfig(roleManifest.Roles[1]) + jobsConfigContents, err = roleImageBuilder.generateJobsConfig(roleManifest.InstanceGroups[1]) assert.NoError(err) assert.Contains(string(jobsConfigContents), "/var/vcap/jobs/tor/bin/tor_ctl") assert.Contains(string(jobsConfigContents), "/var/vcap/jobs-src/tor/templates/data/properties.sh.erb") @@ -226,7 +226,7 @@ func TestGenerateRoleImageDockerfileDir(t *testing.T) { roleImageBuilder, err := NewRoleImageBuilder("foo", compiledPackagesDir, targetPath, lightOpinionsPath, darkOpinionsPath, "", "deadbeef", "6.28.30", ui, nil) assert.NoError(err) - torPkg := getPackage(roleManifest.Roles, "myrole", "tor", "tor") + torPkg := getPackage(roleManifest.InstanceGroups, "myrole", "tor", "tor") const TypeMissing byte = tar.TypeCont // flag to indicate an expected missing file expected := map[string]struct { @@ -240,7 +240,7 @@ func TestGenerateRoleImageDockerfileDir(t *testing.T) { "root/opt/fissile/run.sh": {desc: "run script", mode: 0755}, "root/opt/fissile/pre-stop.sh": {desc: "pre-stop script", mode: 0755}, "root/opt/fissile/readiness-probe.sh": {desc: "readiness probe script", mode: 0755}, - "root/opt/fissile/startup/myrole.sh": {desc: "role specific startup script"}, + "root/opt/fissile/startup/myrole.sh": {desc: "instance group specific startup script"}, "root/var/vcap/jobs-src/tor/monit": {desc: "job monit file"}, "root/var/vcap/jobs-src/tor/templates/bin/monit_debugger": {desc: "job template file"}, "root/var/vcap/jobs-src/tor/config_spec.json": {desc: "tor config spec", keep: true, mode: 0644}, @@ -250,7 +250,7 @@ func TestGenerateRoleImageDockerfileDir(t *testing.T) { } actual := make(map[string][]byte) - populator := roleImageBuilder.NewDockerPopulator(roleManifest.Roles[0], releasePathConfigSpec) + populator := roleImageBuilder.NewDockerPopulator(roleManifest.InstanceGroups[0], releasePathConfigSpec) pipeR, pipeW, err := os.Pipe() assert.NoError(err, "Failed to create a pipe") @@ -352,8 +352,8 @@ func TestGenerateRoleImageDockerfileDir(t *testing.T) { } // getPackage is a helper to get a package from a list of roles -func getPackage(roles model.Roles, role, job, pkg string) *model.Package { - for _, r := range roles { +func getPackage(instanceGroups model.InstanceGroups, role, job, pkg string) *model.Package { + for _, r := range instanceGroups { if r.Name != role { continue } @@ -474,7 +474,7 @@ func TestBuildRoleImages(t *testing.T) { } err = roleImageBuilder.BuildRoleImages( - roleManifest.Roles, + roleManifest.InstanceGroups, "test-registry.com:9000", "test-organization", "test-repository", @@ -496,7 +496,7 @@ func TestBuildRoleImages(t *testing.T) { // Should not allow invalid worker counts err = roleImageBuilder.BuildRoleImages( - roleManifest.Roles, + roleManifest.InstanceGroups, "test-registry.com:9000", "test-organization", "test-repository", @@ -524,7 +524,7 @@ func TestBuildRoleImages(t *testing.T) { } err = roleImageBuilder.BuildRoleImages( - roleManifest.Roles, + roleManifest.InstanceGroups, "test-registry.com:9000", "test-organization", "test-repository", @@ -550,7 +550,7 @@ func TestBuildRoleImages(t *testing.T) { return nil } err = roleImageBuilder.BuildRoleImages( - roleManifest.Roles, + roleManifest.InstanceGroups, "test-registry.com:9000", "test-organization", "test-repository", @@ -558,7 +558,7 @@ func TestBuildRoleImages(t *testing.T) { "", false, false, - len(roleManifest.Roles), + len(roleManifest.InstanceGroups), ) assert.NoError(err) assert.Empty(buildersRan, "should not have ran any builders") @@ -584,7 +584,7 @@ func TestBuildRoleImages(t *testing.T) { return nil } err = roleImageBuilder.BuildRoleImages( - roleManifest.Roles, + roleManifest.InstanceGroups, "test-registry.com:9000", "test-organization", "test-repository", @@ -609,9 +609,9 @@ func TestBuildRoleImages(t *testing.T) { func TestGetRoleDevImageName(t *testing.T) { assert := assert.New(t) - var role model.Role + var instanceGroup model.InstanceGroup - role.Name = "foorole" + instanceGroup.Name = "foorole" reg := "test-registry:9000" org := "test-org" @@ -620,21 +620,21 @@ func TestGetRoleDevImageName(t *testing.T) { // Test with repository only expected := "test-repository-foorole:a886ed76c6d6e5a96ad5c37fb208368a430a29d770f1d149a78e1e6e8091eb12" - imageName := GetRoleDevImageName("", "", repo, &role, version) + imageName := GetRoleDevImageName("", "", repo, &instanceGroup, version) assert.Equal(expected, imageName) // Test with org and repository expected = "test-org/test-repository-foorole:a886ed76c6d6e5a96ad5c37fb208368a430a29d770f1d149a78e1e6e8091eb12" - imageName = GetRoleDevImageName("", org, repo, &role, version) + imageName = GetRoleDevImageName("", org, repo, &instanceGroup, version) assert.Equal(expected, imageName) // Test with registry and repository expected = "test-registry:9000/test-repository-foorole:a886ed76c6d6e5a96ad5c37fb208368a430a29d770f1d149a78e1e6e8091eb12" - imageName = GetRoleDevImageName(reg, "", repo, &role, version) + imageName = GetRoleDevImageName(reg, "", repo, &instanceGroup, version) assert.Equal(expected, imageName) // Test with all three expected = "test-registry:9000/test-org/test-repository-foorole:a886ed76c6d6e5a96ad5c37fb208368a430a29d770f1d149a78e1e6e8091eb12" - imageName = GetRoleDevImageName(reg, org, repo, &role, version) + imageName = GetRoleDevImageName(reg, org, repo, &instanceGroup, version) assert.Equal(expected, imageName) } diff --git a/compilator/compilator.go b/compilator/compilator.go index 8e25627b..7081fb50 100644 --- a/compilator/compilator.go +++ b/compilator/compilator.go @@ -169,8 +169,8 @@ type compileResult struct { // - synchronizer will greedily drain the <-todoCh to starve the // workers out and won't wait for the <-doneCh for the N packages it // drained. -func (c *Compilator) Compile(workerCount int, releases []*model.Release, roles model.Roles, verbose bool) error { - packages, err := c.removeCompiledPackages(c.gatherPackages(releases, roles), verbose) +func (c *Compilator) Compile(workerCount int, releases []*model.Release, instanceGroups model.InstanceGroups, verbose bool) error { + packages, err := c.removeCompiledPackages(c.gatherPackages(releases, instanceGroups), verbose) if err != nil { return fmt.Errorf("failed to remove compiled packages: %v", err) @@ -245,15 +245,15 @@ func (c *Compilator) Compile(workerCount int, releases []*model.Release, roles m return err } -func (c *Compilator) gatherPackages(releases []*model.Release, roles model.Roles) model.Packages { +func (c *Compilator) gatherPackages(releases []*model.Release, instanceGroups model.InstanceGroups) model.Packages { var packages []*model.Package for _, release := range releases { var releasePackages []*model.Package // Get the packages of the release ... - if roles != nil { // Conditional for easier testing - releasePackages = c.gatherPackagesFromRoles(release, roles) + if instanceGroups != nil { // Conditional for easier testing + releasePackages = c.gatherPackagesFromInstanceGroups(release, instanceGroups) } else { releasePackages = release.Packages } @@ -733,21 +733,21 @@ func (c *Compilator) removeCompiledPackages(packages model.Packages, verbose boo return culledPackages, nil } -// gatherPackagesFromRoles gathers the list of packages of the release, from a list of roles, as well as all needed dependencies +// gatherPackagesFromInstanceGroups gathers the list of packages of the release, from a list of instance groups, as well as all needed dependencies // This happens to be a subset of release.Packages, which helps avoid compiling unneeded packages -func (c *Compilator) gatherPackagesFromRoles(release *model.Release, roles model.Roles) []*model.Package { +func (c *Compilator) gatherPackagesFromInstanceGroups(release *model.Release, instanceGroups model.InstanceGroups) []*model.Package { var resultPackages []*model.Package listedPackages := make(map[string]bool) pendingPackages := list.New() // Find the initial list of packages to examine (all packages of the release in the manifest) - for _, role := range roles { - for _, roleJob := range role.RoleJobs { - for _, pkg := range roleJob.Packages { + for _, instanceGroup := range instanceGroups { + for _, jobReference := range instanceGroup.JobReferences { + for _, pkg := range jobReference.Packages { if pkg.Release.Name == release.Name { pendingPackages.PushBack(pkg) if c.grapher != nil { - _ = c.grapher.GraphEdge(pkg.Fingerprint, roleJob.Fingerprint, nil) + _ = c.grapher.GraphEdge(pkg.Fingerprint, jobReference.Fingerprint, nil) } } } diff --git a/compilator/compilator_linux.go b/compilator/compilator_linux.go index c28fde51..53e8265b 100644 --- a/compilator/compilator_linux.go +++ b/compilator/compilator_linux.go @@ -34,7 +34,7 @@ func (c *Compilator) compilePackageInMountNS(pkg *model.Package) (err error) { // Extract package extractDir := c.getSourcePackageDir(pkg) if _, err := pkg.Extract(extractDir); err != nil { - return fmt.Errorf("faile to extract package: %s", err) + return fmt.Errorf("failed to extract package: %s", err) } // in-memory buffer of the log diff --git a/compilator/compilator_test.go b/compilator/compilator_test.go index 4d7d04f8..543a9d94 100644 --- a/compilator/compilator_test.go +++ b/compilator/compilator_test.go @@ -202,7 +202,7 @@ func TestCompilationRoleManifest(t *testing.T) { waitCh := make(chan struct{}) errCh := make(chan error) go func() { - errCh <- c.Compile(1, []*model.Release{release}, roleManifest.Roles, false) + errCh <- c.Compile(1, []*model.Release{release}, roleManifest.InstanceGroups, false) }() go func() { // `libevent` is a dependency of `tor` and will be compiled first diff --git a/docs/configuration.md b/docs/configuration.md index 77ff4884..79da2e8b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -29,7 +29,7 @@ BOSH properties and related BOSHisms. We will be working with the example role manifest for NATS: ```yaml -roles: +instance_groups: - name: nats # The name of the role jobs: # BOSH jobs this role will have - name: nats @@ -155,7 +155,7 @@ example below. While mysql actually needs a load balancer for access this role is made explicit in CF through role `mysql-proxy`. ```yaml -roles: +instance_groups: - name: mysql jobs: - name: mysql diff --git a/kube/deployment.go b/kube/deployment.go index 79d3bb64..960be746 100644 --- a/kube/deployment.go +++ b/kube/deployment.go @@ -9,42 +9,42 @@ import ( "github.com/SUSE/fissile/util" ) -// NewDeployment creates a Deployment for the given role, and its attached services -func NewDeployment(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, helm.Node, error) { - podTemplate, err := NewPodTemplate(role, settings, grapher) +// NewDeployment creates a Deployment for the given instance group, and its attached services +func NewDeployment(instanceGroup *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, helm.Node, error) { + podTemplate, err := NewPodTemplate(instanceGroup, settings, grapher) if err != nil { return nil, nil, err } - svc, err := NewServiceList(role, false, settings) + svc, err := NewServiceList(instanceGroup, false, settings) if err != nil { return nil, nil, err } spec := helm.NewMapping() - spec.Add("selector", newSelector(role.Name)) + spec.Add("selector", newSelector(instanceGroup.Name)) spec.Add("template", podTemplate) - deployment := newKubeConfig("extensions/v1beta1", "Deployment", role.Name, helm.Comment(role.GetLongDescription())) + deployment := newKubeConfig("extensions/v1beta1", "Deployment", instanceGroup.Name, helm.Comment(instanceGroup.GetLongDescription())) deployment.Add("spec", spec) - err = replicaCheck(role, deployment, svc, settings) + err = replicaCheck(instanceGroup, deployment, svc, settings) if err != nil { return nil, nil, err } - err = generalCheck(role, deployment, settings) + err = generalCheck(instanceGroup, deployment, settings) return deployment, svc, err } // getAffinityBlock returns an affinity block to add to a podspec -func getAffinityBlock(role *model.Role) *helm.Mapping { +func getAffinityBlock(instanceGroup *model.InstanceGroup) *helm.Mapping { affinity := helm.NewMapping() - if role.Run.Affinity.PodAntiAffinity != nil { + if instanceGroup.Run.Affinity.PodAntiAffinity != nil { // Add pod anti affinity from role manifest - affinity.Add("podAntiAffinity", role.Run.Affinity.PodAntiAffinity) + affinity.Add("podAntiAffinity", instanceGroup.Run.Affinity.PodAntiAffinity) } // Add node affinity template to be filled in by values.yaml - roleName := makeVarName(role.Name) + roleName := makeVarName(instanceGroup.Name) nodeCond := fmt.Sprintf("if .Values.sizing.%s.affinity.nodeAffinity", roleName) nodeAffinity := fmt.Sprintf("{{ toJson .Values.sizing.%s.affinity.nodeAffinity }}", roleName) affinity.Add("nodeAffinity", nodeAffinity, helm.Block(nodeCond)) @@ -53,20 +53,20 @@ func getAffinityBlock(role *model.Role) *helm.Mapping { } // addAffinityRules adds affinity rules to the pod spec -func addAffinityRules(role *model.Role, spec *helm.Mapping, settings ExportSettings) error { - if role.Run.Affinity != nil { - if role.Run.Affinity.NodeAffinity != nil { +func addAffinityRules(instanceGroup *model.InstanceGroup, spec *helm.Mapping, settings ExportSettings) error { + if instanceGroup.Run.Affinity != nil { + if instanceGroup.Run.Affinity.NodeAffinity != nil { return errors.New("node affinity in role manifest not allowed") } - if role.Run.Affinity.PodAffinity != nil { + if instanceGroup.Run.Affinity.PodAffinity != nil { return errors.New("pod affinity in role manifest not supported") } if settings.CreateHelmChart { podSpec := spec.Get("template", "spec").(*helm.Mapping) - podSpec.Add("affinity", getAffinityBlock(role)) + podSpec.Add("affinity", getAffinityBlock(instanceGroup)) podSpec.Sort() } @@ -86,7 +86,7 @@ func addAffinityRules(role *model.Role, spec *helm.Mapping, settings ExportSetti // generalCheck adds common guards to the pod described by the // controller. This only applies to helm charts, not basic kube // definitions. -func generalCheck(role *model.Role, controller *helm.Mapping, settings ExportSettings) error { +func generalCheck(instanceGroup *model.InstanceGroup, controller *helm.Mapping, settings ExportSettings) error { if !settings.CreateHelmChart { return nil } @@ -94,7 +94,7 @@ func generalCheck(role *model.Role, controller *helm.Mapping, settings ExportSet // The global config keys found under `sizing` in // `values.yaml` (HA, cpu, memory) were moved out of that // hierarchy into `config`. This gives `sizing` a uniform - // structure, containing only the per-role descriptions. It + // structure, containing only the per-instance-group descriptions. It // also means that we now have to guard ourselves against use // of the old keys. Here we add the necessary guard // conditions. @@ -113,7 +113,7 @@ func generalCheck(role *model.Role, controller *helm.Mapping, settings ExportSet } { // requests, limits - More complex to avoid limitations of the go templating system. // Guard on the main variable and then use a guarded value for the child. - // The else branch is present in case we happen to get roles named `cpu` or `memory`. + // The else branch is present in case we happen to get instance groups named `cpu` or `memory`. for _, subkey := range []string{ "limits", @@ -134,58 +134,58 @@ func generalCheck(role *model.Role, controller *helm.Mapping, settings ExportSet // replicaCheck adds various guards to validate the number of replicas // for the pod described by the controller. It further adds the // replicas specification itself as well. -func replicaCheck(role *model.Role, controller *helm.Mapping, service helm.Node, settings ExportSettings) error { +func replicaCheck(instanceGroup *model.InstanceGroup, controller *helm.Mapping, service helm.Node, settings ExportSettings) error { spec := controller.Get("spec").(*helm.Mapping) - err := addAffinityRules(role, spec, settings) + err := addAffinityRules(instanceGroup, spec, settings) if err != nil { return err } if !settings.CreateHelmChart { - spec.Add("replicas", role.Run.Scaling.Min) + spec.Add("replicas", instanceGroup.Run.Scaling.Min) spec.Sort() return nil } - roleName := makeVarName(role.Name) + roleName := makeVarName(instanceGroup.Name) count := fmt.Sprintf(".Values.sizing.%s.count", roleName) - if role.Run.Scaling.HA != role.Run.Scaling.Min { + if instanceGroup.Run.Scaling.HA != instanceGroup.Run.Scaling.Min { // Under HA use HA count if the user hasn't explicitly modified the default count count = fmt.Sprintf("{{ if and .Values.config.HA (eq (int %s) %d) -}} %d {{- else -}} {{ %s }} {{- end }}", - count, role.Run.Scaling.Min, role.Run.Scaling.HA, count) + count, instanceGroup.Run.Scaling.Min, instanceGroup.Run.Scaling.HA, count) } else { count = "{{ " + count + " }}" } spec.Add("replicas", count) spec.Sort() - if role.Run.Scaling.Min == 0 { + if instanceGroup.Run.Scaling.Min == 0 { block := helm.Block(fmt.Sprintf("if gt (int .Values.sizing.%s.count) 0", roleName)) controller.Set(block) if service != nil { service.Set(block) } } else { - fail := fmt.Sprintf(`{{ fail "%s must have at least %d instances" }}`, roleName, role.Run.Scaling.Min) - block := fmt.Sprintf("if lt (int .Values.sizing.%s.count) %d", roleName, role.Run.Scaling.Min) + fail := fmt.Sprintf(`{{ fail "%s must have at least %d instances" }}`, roleName, instanceGroup.Run.Scaling.Min) + block := fmt.Sprintf("if lt (int .Values.sizing.%s.count) %d", roleName, instanceGroup.Run.Scaling.Min) controller.Add("_minReplicas", fail, helm.Block(block)) - if role.Run.Scaling.HA != role.Run.Scaling.Min { - fail := fmt.Sprintf(`{{ fail "%s must have at least %d instances for HA" }}`, roleName, role.Run.Scaling.HA) + if instanceGroup.Run.Scaling.HA != instanceGroup.Run.Scaling.Min { + fail := fmt.Sprintf(`{{ fail "%s must have at least %d instances for HA" }}`, roleName, instanceGroup.Run.Scaling.HA) count := fmt.Sprintf(".Values.sizing.%s.count", roleName) // If count != Min then count must be >= HA block := fmt.Sprintf("if and .Values.config.HA (and (ne (int %s) %d) (lt (int %s) %d))", - count, role.Run.Scaling.Min, count, role.Run.Scaling.HA) + count, instanceGroup.Run.Scaling.Min, count, instanceGroup.Run.Scaling.HA) controller.Add("_minHAReplicas", fail, helm.Block(block)) } } - fail := fmt.Sprintf(`{{ fail "%s cannot have more than %d instances" }}`, roleName, role.Run.Scaling.Max) - block := fmt.Sprintf("if gt (int .Values.sizing.%s.count) %d", roleName, role.Run.Scaling.Max) + fail := fmt.Sprintf(`{{ fail "%s cannot have more than %d instances" }}`, roleName, instanceGroup.Run.Scaling.Max) + block := fmt.Sprintf("if gt (int .Values.sizing.%s.count) %d", roleName, instanceGroup.Run.Scaling.Max) controller.Add("_maxReplicas", fail, helm.Block(block)) - if role.Run.Scaling.MustBeOdd { + if instanceGroup.Run.Scaling.MustBeOdd { fail := fmt.Sprintf(`{{ fail "%s must have an odd instance count" }}`, roleName) block := fmt.Sprintf("if eq (mod (int .Values.sizing.%s.count) 2) 0", roleName) controller.Add("_oddReplicas", fail, helm.Block(block)) diff --git a/kube/deployment_test.go b/kube/deployment_test.go index 6bd8c1ab..923c9571 100644 --- a/kube/deployment_test.go +++ b/kube/deployment_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" ) -func deploymentTestLoadRole(assert *assert.Assertions, roleName, manifestName string) *model.Role { +func deploymentTestLoad(assert *assert.Assertions, roleName, manifestName string) *model.InstanceGroup { workDir, err := os.Getwd() assert.NoError(err) @@ -30,11 +30,11 @@ func deploymentTestLoadRole(assert *assert.Assertions, roleName, manifestName st return nil } - role := manifest.LookupRole(roleName) - if !assert.NotNil(role, "Failed to find role %s", roleName) { + instanceGroup := manifest.LookupInstanceGroup(roleName) + if !assert.NotNil(instanceGroup, "Failed to find instance group %s", roleName) { return nil } - return role + return instanceGroup } type FakeGrapher struct { @@ -52,8 +52,8 @@ func TestNewDeploymentKube(t *testing.T) { t.Parallel() assert := assert.New(t) - role := deploymentTestLoadRole(assert, "role", "pod-with-valid-pod-anti-affinity.yml") - if role == nil { + instanceGroup := deploymentTestLoad(assert, "some-group", "pod-with-valid-pod-anti-affinity.yml") + if instanceGroup == nil { return } @@ -61,21 +61,21 @@ func TestNewDeploymentKube(t *testing.T) { grapher := FakeGrapher{} - deployment, svc, err := NewDeployment(role, settings, grapher) + deployment, svc, err := NewDeployment(instanceGroup, settings, grapher) assert.NoError(err) assert.Nil(svc) assert.NotNil(deployment) assert.Equal(deployment.Get("kind").String(), "Deployment") - assert.Equal(deployment.Get("metadata", "name").String(), "role") + assert.Equal(deployment.Get("metadata", "name").String(), "some-group") } func TestNewDeploymentHelm(t *testing.T) { t.Parallel() assert := assert.New(t) - role := deploymentTestLoadRole(assert, "role", "pod-with-valid-pod-anti-affinity.yml") - if role == nil { + instanceGroup := deploymentTestLoad(assert, "some-group", "pod-with-valid-pod-anti-affinity.yml") + if instanceGroup == nil { return } @@ -86,61 +86,61 @@ func TestNewDeploymentHelm(t *testing.T) { grapher := FakeGrapher{} - deployment, svc, err := NewDeployment(role, settings, grapher) + deployment, svc, err := NewDeployment(instanceGroup, settings, grapher) assert.NoError(err) assert.Nil(svc) assert.NotNil(deployment) assert.Equal(deployment.Get("kind").String(), "Deployment") - assert.Equal(deployment.Get("metadata", "name").String(), "role") + assert.Equal(deployment.Get("metadata", "name").String(), "some-group") t.Run("Defaults", func(t *testing.T) { t.Parallel() // Rendering fails with defaults, template needs information // about sizing and the like. config := map[string]interface{}{ - "Values.sizing.role.count": nil, + "Values.sizing.some_group.count": nil, } _, err := RenderNode(deployment, config) assert.EqualError(err, - `template: :9:17: executing "" at : error calling fail: role must have at least 1 instances`) + `template: :9:17: executing "" at : error calling fail: some_group must have at least 1 instances`) }) t.Run("Configured, not enough replicas", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.role.count": "0", - "Values.sizing.role.affinity.nodeAffinity": "snafu", - "Values.sizing.role.capabilities": []interface{}{}, - "Values.kube.registry.hostname": "docker.suse.fake", - "Values.kube.organization": "splat", - "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", + "Values.sizing.some_group.count": "0", + "Values.sizing.some_group.affinity.nodeAffinity": "snafu", + "Values.sizing.some_group.capabilities": []interface{}{}, + "Values.kube.registry.hostname": "docker.suse.fake", + "Values.kube.organization": "splat", + "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", } _, err := RenderNode(deployment, config) assert.EqualError(err, - `template: :9:17: executing "" at : error calling fail: role must have at least 1 instances`) + `template: :9:17: executing "" at : error calling fail: some_group must have at least 1 instances`) }) t.Run("Configured, too many replicas", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.role.count": "10", - "Values.sizing.role.affinity.nodeAffinity": "snafu", - "Values.sizing.role.capabilities": []interface{}{}, - "Values.kube.registry.hostname": "docker.suse.fake", - "Values.kube.organization": "splat", - "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", + "Values.sizing.some_group.count": "10", + "Values.sizing.some_group.affinity.nodeAffinity": "snafu", + "Values.sizing.some_group.capabilities": []interface{}{}, + "Values.kube.registry.hostname": "docker.suse.fake", + "Values.kube.organization": "splat", + "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", } _, err := RenderNode(deployment, config) assert.EqualError(err, - `template: :5:17: executing "" at : error calling fail: role cannot have more than 1 instances`) + `template: :5:17: executing "" at : error calling fail: some_group cannot have more than 1 instances`) }) t.Run("Configured, bad key sizing.HA", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.HA": "true", - "Values.sizing.role.count": "1", + "Values.sizing.HA": "true", + "Values.sizing.some_group.count": "1", } _, err := RenderNode(deployment, config) assert.EqualError(err, @@ -150,8 +150,8 @@ func TestNewDeploymentHelm(t *testing.T) { t.Run("Configured, bad key sizing.memory.limits", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.memory.limits": "true", - "Values.sizing.role.count": "1", + "Values.sizing.memory.limits": "true", + "Values.sizing.some_group.count": "1", } _, err := RenderNode(deployment, config) assert.EqualError(err, @@ -161,8 +161,8 @@ func TestNewDeploymentHelm(t *testing.T) { t.Run("Configured, bad key sizing.memory.requests", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.memory.requests": "true", - "Values.sizing.role.count": "1", + "Values.sizing.memory.requests": "true", + "Values.sizing.some_group.count": "1", } _, err := RenderNode(deployment, config) assert.EqualError(err, @@ -172,8 +172,8 @@ func TestNewDeploymentHelm(t *testing.T) { t.Run("Configured, bad key sizing.cpu.limits", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.cpu.limits": "true", - "Values.sizing.role.count": "1", + "Values.sizing.cpu.limits": "true", + "Values.sizing.some_group.count": "1", } _, err := RenderNode(deployment, config) assert.EqualError(err, @@ -183,8 +183,8 @@ func TestNewDeploymentHelm(t *testing.T) { t.Run("Configured, bad key sizing.cpu.requests", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.cpu.requests": "true", - "Values.sizing.role.count": "1", + "Values.sizing.cpu.requests": "true", + "Values.sizing.some_group.count": "1", } _, err := RenderNode(deployment, config) assert.EqualError(err, @@ -194,12 +194,12 @@ func TestNewDeploymentHelm(t *testing.T) { t.Run("Configured", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.role.count": "1", - "Values.sizing.role.affinity.nodeAffinity": "snafu", - "Values.sizing.role.capabilities": []interface{}{}, - "Values.kube.registry.hostname": "docker.suse.fake", - "Values.kube.organization": "splat", - "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", + "Values.sizing.some_group.count": "1", + "Values.sizing.some_group.affinity.nodeAffinity": "snafu", + "Values.sizing.some_group.capabilities": []interface{}{}, + "Values.kube.registry.hostname": "docker.suse.fake", + "Values.kube.organization": "splat", + "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", } actual, err := RoundtripNode(deployment, config) @@ -210,19 +210,19 @@ func TestNewDeploymentHelm(t *testing.T) { apiVersion: "extensions/v1beta1" kind: "Deployment" metadata: - name: "role" + name: "some-group" labels: - skiff-role-name: "role" + skiff-role-name: "some-group" spec: replicas: 1 selector: matchLabels: - skiff-role-name: "role" + skiff-role-name: "some-group" template: metadata: - name: "role" + name: "some-group" labels: - skiff-role-name: "role" + skiff-role-name: "some-group" annotations: checksum/config: 08c80ed11902eefef09739d41c91408238bb8b5e7be7cc1e5db933b7c8de65c3 spec: @@ -235,7 +235,7 @@ func TestNewDeploymentHelm(t *testing.T) { - key: "skiff-role-name" operator: "In" values: - - "role" + - "some-group" topologyKey: "beta.kubernetes.io/os" weight: 100 nodeAffinity: "snafu" @@ -247,14 +247,14 @@ func TestNewDeploymentHelm(t *testing.T) { valueFrom: fieldRef: fieldPath: "metadata.namespace" - image: "docker.suse.fake/splat/the_repos-role:bfff10016c4e9e46c9541d35e6bf52054c54e96a" + image: "docker.suse.fake/splat/the_repos-some-group:bfff10016c4e9e46c9541d35e6bf52054c54e96a" lifecycle: preStop: exec: command: - "/opt/fissile/pre-stop.sh" livenessProbe: ~ - name: "role" + name: "some-group" ports: ~ readinessProbe: exec: @@ -278,29 +278,29 @@ func TestGetAffinityBlock(t *testing.T) { t.Parallel() assert := assert.New(t) - role := deploymentTestLoadRole(assert, "role", "pod-with-valid-pod-anti-affinity.yml") - if role == nil { + instanceGroup := deploymentTestLoad(assert, "some-group", "pod-with-valid-pod-anti-affinity.yml") + if instanceGroup == nil { return } - affinity := getAffinityBlock(role) + affinity := getAffinityBlock(instanceGroup) assert.NotNil(affinity.Get("podAntiAffinity")) assert.NotNil(affinity.Get("nodeAffinity")) assert.Equal(affinity.Names(), []string{"podAntiAffinity", "nodeAffinity"}) - assert.Equal(affinity.Get("nodeAffinity").Block(), "if .Values.sizing.role.affinity.nodeAffinity") + assert.Equal(affinity.Get("nodeAffinity").Block(), "if .Values.sizing.some_group.affinity.nodeAffinity") - role = deploymentTestLoadRole(assert, "role", "pod-with-no-pod-anti-affinity.yml") - if role == nil { + instanceGroup = deploymentTestLoad(assert, "some-group", "pod-with-no-pod-anti-affinity.yml") + if instanceGroup == nil { return } - affinity = getAffinityBlock(role) + affinity = getAffinityBlock(instanceGroup) assert.Nil(affinity.Get("podAntiAffinity")) assert.NotNil(affinity.Get("nodeAffinity")) assert.Equal(affinity.Names(), []string{"nodeAffinity"}) - assert.Equal(affinity.Get("nodeAffinity").Block(), "if .Values.sizing.role.affinity.nodeAffinity") + assert.Equal(affinity.Get("nodeAffinity").Block(), "if .Values.sizing.some_group.affinity.nodeAffinity") } func createEmptySpec() *helm.Mapping { @@ -325,10 +325,10 @@ func TestAddAffinityRules(t *testing.T) { emptySpec := createEmptySpec() // - // Test role with valid anti affinity + // Test instance group with valid anti affinity // - role := deploymentTestLoadRole(assert, "role", "pod-with-valid-pod-anti-affinity.yml") - if role == nil { + instanceGroup := deploymentTestLoad(assert, "some-group", "pod-with-valid-pod-anti-affinity.yml") + if instanceGroup == nil { return } @@ -336,38 +336,38 @@ func TestAddAffinityRules(t *testing.T) { settings := ExportSettings{CreateHelmChart: true} - err := addAffinityRules(role, spec, settings) + err := addAffinityRules(instanceGroup, spec, settings) assert.NotNil(spec.Get("template", "spec", "affinity", "podAntiAffinity")) assert.NotNil(spec.Get("template", "spec", "affinity", "nodeAffinity")) assert.NoError(err) // - // Test role with pod affinity defined + // Test instance group with pod affinity defined // - role = deploymentTestLoadRole(assert, "role", "pod-with-invalid-pod-affinity.yml") - if role == nil { + instanceGroup = deploymentTestLoad(assert, "some-group", "pod-with-invalid-pod-affinity.yml") + if instanceGroup == nil { return } spec = createEmptySpec() - err = addAffinityRules(role, spec, settings) + err = addAffinityRules(instanceGroup, spec, settings) assert.Error(err) assert.Equal(spec, emptySpec) // - // Test role with node affinity defined + // Test instance group with node affinity defined // - role = deploymentTestLoadRole(assert, "role", "pod-with-invalid-node-affinity.yml") - if role == nil { + instanceGroup = deploymentTestLoad(assert, "some-group", "pod-with-invalid-node-affinity.yml") + if instanceGroup == nil { return } spec = createEmptySpec() - err = addAffinityRules(role, spec, settings) + err = addAffinityRules(instanceGroup, spec, settings) assert.Error(err) assert.Equal(spec, emptySpec) @@ -375,8 +375,8 @@ func TestAddAffinityRules(t *testing.T) { // // Not creating the helm chart should only add the annotation // - role = deploymentTestLoadRole(assert, "role", "pod-with-valid-pod-anti-affinity.yml") - if role == nil { + instanceGroup = deploymentTestLoad(assert, "some-group", "pod-with-valid-pod-anti-affinity.yml") + if instanceGroup == nil { return } @@ -384,7 +384,7 @@ func TestAddAffinityRules(t *testing.T) { settings = ExportSettings{CreateHelmChart: false} - err = addAffinityRules(role, spec, settings) + err = addAffinityRules(instanceGroup, spec, settings) assert.Nil(spec.Get("template", "spec", "affinity", "podAntiAffinity")) assert.NoError(err) } @@ -393,8 +393,8 @@ func TestNewDeploymentWithEmptyDirVolume(t *testing.T) { t.Parallel() assert := assert.New(t) - role := deploymentTestLoadRole(assert, "role", "colocated-containers-with-deployment-and-empty-dir.yml") - if role == nil { + instanceGroup := deploymentTestLoad(assert, "some-group", "colocated-containers-with-deployment-and-empty-dir.yml") + if instanceGroup == nil { return } @@ -403,35 +403,35 @@ func TestNewDeploymentWithEmptyDirVolume(t *testing.T) { Repository: "the_repos", } - deployment, svc, err := NewDeployment(role, settings, nil) + deployment, svc, err := NewDeployment(instanceGroup, settings, nil) assert.NoError(err) assert.Nil(svc) assert.NotNil(deployment) assert.Equal(deployment.Get("kind").String(), "Deployment") - assert.Equal(deployment.Get("metadata", "name").String(), "role") + assert.Equal(deployment.Get("metadata", "name").String(), "some-group") t.Run("Defaults", func(t *testing.T) { t.Parallel() // Rendering fails with defaults, template needs information // about sizing and the like. config := map[string]interface{}{ - "Values.sizing.role.count": nil, + "Values.sizing.some_group.count": nil, } _, err := RenderNode(deployment, config) assert.EqualError(err, - `template: :9:17: executing "" at : error calling fail: role must have at least 1 instances`) + `template: :9:17: executing "" at : error calling fail: some_group must have at least 1 instances`) }) t.Run("Configured", func(t *testing.T) { t.Parallel() config := map[string]interface{}{ - "Values.sizing.role.count": "1", - "Values.sizing.role.capabilities": []interface{}{}, - "Values.sizing.colocated.capabilities": []interface{}{}, - "Values.kube.registry.hostname": "docker.suse.fake", - "Values.kube.organization": "splat", - "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", + "Values.sizing.some_group.count": "1", + "Values.sizing.some_group.capabilities": []interface{}{}, + "Values.sizing.colocated.capabilities": []interface{}{}, + "Values.kube.registry.hostname": "docker.suse.fake", + "Values.kube.organization": "splat", + "Values.env.KUBERNETES_CLUSTER_DOMAIN": "cluster.local", } actual, err := RoundtripNode(deployment, config) @@ -445,7 +445,7 @@ func TestNewDeploymentWithEmptyDirVolume(t *testing.T) { template: spec: containers: - - name: "role" + - name: "some-group" volumeMounts: - name: shared-data diff --git a/kube/job.go b/kube/job.go index f777a704..ff145c0d 100644 --- a/kube/job.go +++ b/kube/job.go @@ -8,24 +8,24 @@ import ( "github.com/SUSE/fissile/util" ) -// NewJob creates a new Job for the given role, as well as any objects it depends on -func NewJob(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { - podTemplate, err := NewPodTemplate(role, settings, grapher) +// NewJob creates a new Job for the given instance group, as well as any objects it depends on +func NewJob(instanceGroup *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { + podTemplate, err := NewPodTemplate(instanceGroup, settings, grapher) if err != nil { return nil, err } // Jobs must have a restart policy that isn't "always" - switch role.Run.FlightStage { + switch instanceGroup.Run.FlightStage { case model.FlightStageManual: podTemplate.Get("spec", "restartPolicy").SetValue("Never") case model.FlightStageFlight, model.FlightStagePreFlight, model.FlightStagePostFlight: podTemplate.Get("spec", "restartPolicy").SetValue("OnFailure") default: - return nil, fmt.Errorf("Role %s has unexpected flight stage %s", role.Name, role.Run.FlightStage) + return nil, fmt.Errorf("Instance group %s has unexpected flight stage %s", instanceGroup.Name, instanceGroup.Run.FlightStage) } - name := role.Name + name := instanceGroup.Name apiVersion := "batch/v1" if settings.CreateHelmChart { name += "-{{ .Release.Revision }}" @@ -33,12 +33,12 @@ func NewJob(role *model.Role, settings ExportSettings, grapher util.ModelGrapher metadata := helm.NewMapping() metadata.Add("name", name) - if role.Run.ObjectAnnotations != nil { - metadata.Add("annotations", *role.Run.ObjectAnnotations) + if instanceGroup.Run.ObjectAnnotations != nil { + metadata.Add("annotations", *instanceGroup.Run.ObjectAnnotations) } metadata.Sort() - job := newTypeMeta(apiVersion, "Job", helm.Comment(role.GetLongDescription())) + job := newTypeMeta(apiVersion, "Job", helm.Comment(instanceGroup.GetLongDescription())) job.Add("metadata", metadata) job.Add("spec", helm.NewMapping("template", podTemplate)) diff --git a/kube/job_test.go b/kube/job_test.go index bdfdac39..7ed569a1 100644 --- a/kube/job_test.go +++ b/kube/job_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -func jobTestLoadRole(assert *assert.Assertions, roleName, manifestName string) *model.Role { +func jobTestLoadRole(assert *assert.Assertions, roleName, manifestName string) *model.InstanceGroup { workDir, err := os.Getwd() assert.NoError(err) @@ -29,25 +29,25 @@ func jobTestLoadRole(assert *assert.Assertions, roleName, manifestName string) * return nil } - role := manifest.LookupRole(roleName) - if !assert.NotNil(role, "Failed to find role %s", roleName) { + instanceGroup := manifest.LookupInstanceGroup(roleName) + if !assert.NotNil(instanceGroup, "Failed to find instance group %s", roleName) { return nil } - return role + return instanceGroup } func TestJobPreFlight(t *testing.T) { t.Parallel() assert := assert.New(t) - role := jobTestLoadRole(assert, "pre-role", "jobs.yml") - if role == nil { + instanceGroup := jobTestLoadRole(assert, "pre-role", "jobs.yml") + if instanceGroup == nil { return } - job, err := NewJob(role, ExportSettings{ + job, err := NewJob(instanceGroup, ExportSettings{ Opinions: model.NewEmptyOpinions(), }, nil) - if !assert.NoError(err, "Failed to create job from role pre-role") { + if !assert.NoError(err, "Failed to create job from instance group pre-role") { return } assert.NotNil(job) @@ -76,12 +76,12 @@ func TestJobPreFlight(t *testing.T) { func TestJobPostFlight(t *testing.T) { t.Parallel() assert := assert.New(t) - role := jobTestLoadRole(assert, "post-role", "jobs.yml") - if role == nil { + instanceGroup := jobTestLoadRole(assert, "post-role", "jobs.yml") + if instanceGroup == nil { return } - job, err := NewJob(role, ExportSettings{ + job, err := NewJob(instanceGroup, ExportSettings{ Opinions: model.NewEmptyOpinions(), }, nil) if !assert.NoError(err, "Failed to create job from role post-role") { @@ -114,15 +114,15 @@ func TestJobWithAnnotations(t *testing.T) { t.Parallel() assert := assert.New(t) - role := jobTestLoadRole(assert, "role", "job-with-annotation.yml") - if role == nil { + instanceGroup := jobTestLoadRole(assert, "some-group", "job-with-annotation.yml") + if instanceGroup == nil { return } - job, err := NewJob(role, ExportSettings{ + job, err := NewJob(instanceGroup, ExportSettings{ Opinions: model.NewEmptyOpinions(), }, nil) - if !assert.NoError(err, "Failed to create job from role pre-role") { + if !assert.NoError(err, "Failed to create job from instance group pre-role") { return } assert.NotNil(job) @@ -135,7 +135,7 @@ func TestJobWithAnnotations(t *testing.T) { apiVersion: batch/v1 kind: Job metadata: - name: role + name: some-group annotations: helm.sh/hook: post-install `, actual) diff --git a/kube/pod.go b/kube/pod.go index 47e88157..71160b68 100644 --- a/kube/pod.go +++ b/kube/pod.go @@ -21,13 +21,13 @@ const defaultInitialDelaySeconds = 600 // NewPodTemplate creates a new pod template spec for a given role, as well as // any objects it depends on -func NewPodTemplate(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { +func NewPodTemplate(role *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { if role.Run == nil { return nil, fmt.Errorf("Role %s has no run information", role.Name) } containers := helm.NewList() - for _, candidate := range append([]*model.Role{role}, role.GetColocatedRoles()...) { + for _, candidate := range append([]*model.InstanceGroup{role}, role.GetColocatedRoles()...) { containerMapping, err := getContainerMapping(candidate, settings, grapher) if err != nil { return nil, err @@ -69,7 +69,7 @@ func NewPodTemplate(role *model.Role, settings ExportSettings, grapher util.Mode } // NewPod creates a new Pod for the given role, as well as any objects it depends on -func NewPod(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { +func NewPod(role *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, error) { podTemplate, err := NewPodTemplate(role, settings, grapher) if err != nil { return nil, err @@ -92,7 +92,7 @@ func NewPod(role *model.Role, settings ExportSettings, grapher util.ModelGrapher } // getContainerMapping returns the container list entry mapping for the provided role -func getContainerMapping(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (*helm.Mapping, error) { +func getContainerMapping(role *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (*helm.Mapping, error) { roleName := strings.Replace(strings.ToLower(role.Name), "_", "-", -1) roleVarName := makeVarName(roleName) @@ -189,7 +189,7 @@ func getContainerMapping(role *model.Role, settings ExportSettings, grapher util } // getContainerImageName returns the name of the docker image to use for a role -func getContainerImageName(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (string, error) { +func getContainerImageName(role *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (string, error) { devVersion, err := role.GetRoleDevVersion(settings.Opinions, settings.TagExtra, settings.FissileVersion, grapher) if err != nil { return "", err @@ -208,7 +208,7 @@ func getContainerImageName(role *model.Role, settings ExportSettings, grapher ut } // getContainerPorts returns a list of ports for a role -func getContainerPorts(role *model.Role, settings ExportSettings) (helm.Node, error) { +func getContainerPorts(role *model.InstanceGroup, settings ExportSettings) (helm.Node, error) { var ports []helm.Node for _, port := range role.Run.ExposedPorts { if settings.CreateHelmChart && port.CountIsConfigurable { @@ -254,7 +254,7 @@ func getContainerPorts(role *model.Role, settings ExportSettings) (helm.Node, er } // getVolumeMounts gets the list of volume mounts for a role -func getVolumeMounts(role *model.Role, createHelmChart bool) helm.Node { +func getVolumeMounts(role *model.InstanceGroup, createHelmChart bool) helm.Node { var mounts []helm.Node for _, volume := range role.Run.Volumes { var mount helm.Node @@ -294,7 +294,7 @@ func makeSecretVar(name string, generated bool, modifiers ...helm.NodeModifier) } // getNonClaimVolumes returns the list of pod volumes that are _not_ bound with volume claims -func getNonClaimVolumes(role *model.Role, createHelmChart bool) helm.Node { +func getNonClaimVolumes(role *model.InstanceGroup, createHelmChart bool) helm.Node { var mounts []helm.Node for _, volume := range role.Run.Volumes { switch volume.Type { @@ -321,7 +321,7 @@ func getNonClaimVolumes(role *model.Role, createHelmChart bool) helm.Node { return helm.NewNode(mounts) } -func getEnvVars(role *model.Role, settings ExportSettings) (helm.Node, error) { +func getEnvVars(role *model.InstanceGroup, settings ExportSettings) (helm.Node, error) { configs, err := role.GetVariablesForRole() if err != nil { return nil, err @@ -340,7 +340,7 @@ func getEnvVarsFromConfigs(configs model.ConfigurationVariableSlice, settings Ex match := sizingCountRegexp.FindStringSubmatch(config.Name) if match != nil { roleName := strings.Replace(strings.ToLower(match[1]), "_", "-", -1) - role := settings.RoleManifest.LookupRole(roleName) + role := settings.RoleManifest.LookupInstanceGroup(roleName) if role == nil { return nil, fmt.Errorf("Role %s for %s not found", roleName, config.Name) } @@ -362,7 +362,7 @@ func getEnvVarsFromConfigs(configs model.ConfigurationVariableSlice, settings Ex match = sizingPortsRegexp.FindStringSubmatch(config.Name) if match != nil { roleName := strings.Replace(strings.ToLower(match[1]), "_", "-", -1) - role := settings.RoleManifest.LookupRole(roleName) + role := settings.RoleManifest.LookupInstanceGroup(roleName) if role == nil { return nil, fmt.Errorf("Role %s for %s not found", roleName, config.Name) } @@ -489,7 +489,7 @@ func getEnvVarsFromConfigs(configs model.ConfigurationVariableSlice, settings Ex return helm.NewNode(env), nil } -func getSecurityContext(role *model.Role, createHelmChart bool) helm.Node { +func getSecurityContext(role *model.InstanceGroup, createHelmChart bool) helm.Node { var hasAll string var notAll string var config string @@ -532,7 +532,7 @@ func getSecurityContext(role *model.Role, createHelmChart bool) helm.Node { return helm.NewMapping("capabilities", helm.NewMapping("add", helm.NewNode(capabilities))) } -func getContainerLivenessProbe(role *model.Role) (helm.Node, error) { +func getContainerLivenessProbe(role *model.InstanceGroup) (helm.Node, error) { if role.Run == nil { return nil, nil } @@ -552,7 +552,7 @@ func getContainerLivenessProbe(role *model.Role) (helm.Node, error) { return nil, nil } -func getContainerReadinessProbe(role *model.Role) (helm.Node, error) { +func getContainerReadinessProbe(role *model.InstanceGroup) (helm.Node, error) { if role.Run == nil { return nil, nil } @@ -628,7 +628,7 @@ func getContainerReadinessProbe(role *model.Role) (helm.Node, error) { } } -func configureContainerProbe(role *model.Role, probeName string, roleProbe *model.HealthProbe) (*helm.Mapping, bool, error) { +func configureContainerProbe(role *model.InstanceGroup, probeName string, roleProbe *model.HealthProbe) (*helm.Mapping, bool, error) { // InitialDelaySeconds - // TimeoutSeconds - 1, min 1 // PeriodSeconds - 10, min 1 (interval between probes) @@ -662,7 +662,7 @@ func configureContainerProbe(role *model.Role, probeName string, roleProbe *mode return probe.Sort(), false, nil } -func getContainerURLProbe(role *model.Role, probeName string, roleProbe *model.HealthProbe) (helm.Node, error) { +func getContainerURLProbe(role *model.InstanceGroup, probeName string, roleProbe *model.HealthProbe) (helm.Node, error) { probeURL, err := url.Parse(roleProbe.URL) if err != nil { return nil, fmt.Errorf("Invalid %s URL health check for %s: %s", probeName, role.Name, err) diff --git a/kube/pod_test.go b/kube/pod_test.go index d857b953..a93a9088 100644 --- a/kube/pod_test.go +++ b/kube/pod_test.go @@ -16,7 +16,7 @@ import ( yaml "gopkg.in/yaml.v2" ) -func podTemplateTestLoadRole(assert *assert.Assertions) *model.Role { +func podTemplateTestLoadRole(assert *assert.Assertions) *model.InstanceGroup { workDir, err := os.Getwd() if !assert.NoError(err) { return nil @@ -34,8 +34,8 @@ func podTemplateTestLoadRole(assert *assert.Assertions) *model.Role { if !assert.NoError(err) { return nil } - role := manifest.LookupRole("myrole") - if !assert.NotNil(role, "Failed to find role in manifest") { + instanceGroup := manifest.LookupInstanceGroup("myrole") + if !assert.NotNil(instanceGroup, "Failed to find role in manifest") { return nil } @@ -48,7 +48,7 @@ func podTemplateTestLoadRole(assert *assert.Assertions) *model.Role { Secret: true, Internal: true, }) - return role + return instanceGroup } type Sample struct { @@ -323,11 +323,11 @@ func TestPodGetEnvVars(t *testing.T) { return } - if !assert.Equal(1, len(role.RoleJobs), "Role should have one job") { + if !assert.Equal(1, len(role.JobReferences), "Role should have one job") { return } - role.RoleJobs[0].Properties = []*model.JobProperty{ + role.JobReferences[0].Properties = []*model.JobProperty{ &model.JobProperty{ Name: "some-property", }, @@ -396,8 +396,8 @@ func TestPodGetEnvVarsFromConfigSizingCountKube(t *testing.T) { }, }, ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", Run: &model.RoleRun{ Scaling: &model.RoleRunScaling{ @@ -434,8 +434,8 @@ func TestPodGetEnvVarsFromConfigSizingCountHelm(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -473,8 +473,8 @@ func TestPodGetEnvVarsFromConfigSizingPortsKube(t *testing.T) { }, }, ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", Run: &model.RoleRun{ ExposedPorts: []*model.RoleRunExposedPort{ @@ -521,8 +521,8 @@ func TestPodGetEnvVarsFromConfigSizingPortsHelm(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", Run: &model.RoleRun{ ExposedPorts: []*model.RoleRunExposedPort{ @@ -568,8 +568,8 @@ func TestPodGetEnvVarsFromConfigGenerationCounterKube(t *testing.T) { }, }, ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -601,8 +601,8 @@ func TestPodGetEnvVarsFromConfigGenerationCounterHelm(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -637,8 +637,8 @@ func TestPodGetEnvVarsFromConfigGenerationNameKube(t *testing.T) { }, }, ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -670,8 +670,8 @@ func TestPodGetEnvVarsFromConfigGenerationNameHelm(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -707,8 +707,8 @@ func TestPodGetEnvVarsFromConfigSecretsKube(t *testing.T) { }, }, ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -739,8 +739,8 @@ func TestPodGetEnvVarsFromConfigSecretsHelm(t *testing.T) { settings := ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -878,8 +878,8 @@ func TestPodGetEnvVarsFromConfigNonSecretKube(t *testing.T) { settings := ExportSettings{ RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -943,8 +943,8 @@ func TestPodGetEnvVarsFromConfigNonSecretHelmUserOptional(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -1009,8 +1009,8 @@ func TestPodGetEnvVarsFromConfigNonSecretHelmUserRequired(t *testing.T) { }, ExportSettings{ CreateHelmChart: true, RoleManifest: &model.RoleManifest{ - Roles: []*model.Role{ - &model.Role{ + InstanceGroups: []*model.InstanceGroup{ + &model.InstanceGroup{ Name: "foo", }, }, @@ -1690,7 +1690,7 @@ func TestPodGetContainerReadinessProbe(t *testing.T) { } } -func podTestLoadRoleFrom(assert *assert.Assertions, roleName, manifestName string) *model.Role { +func podTestLoadRoleFrom(assert *assert.Assertions, roleName, manifestName string) *model.InstanceGroup { workDir, err := os.Getwd() assert.NoError(err) @@ -1706,7 +1706,7 @@ func podTestLoadRoleFrom(assert *assert.Assertions, roleName, manifestName strin if !assert.NoError(err) { return nil } - role := manifest.LookupRole(roleName) + role := manifest.LookupInstanceGroup(roleName) if !assert.NotNil(role, "Failed to find role %s", roleName) { return nil } @@ -1714,7 +1714,7 @@ func podTestLoadRoleFrom(assert *assert.Assertions, roleName, manifestName strin return role } -func podTestLoadRole(assert *assert.Assertions, roleName string) *model.Role { +func podTestLoadRole(assert *assert.Assertions, roleName string) *model.InstanceGroup { return podTestLoadRoleFrom(assert, roleName, "pods.yml") } @@ -2733,7 +2733,7 @@ func TestPodVolumeTypeEmptyDir(t *testing.T) { assert.NotNil(roleManifest) // Check non-claim volumes - mounts := getNonClaimVolumes(roleManifest.LookupRole("main-role"), true) + mounts := getNonClaimVolumes(roleManifest.LookupInstanceGroup("main-role"), true) assert.NotNil(mounts) actual, err := RoundtripNode(mounts, nil) if !assert.NoError(err) { @@ -2746,7 +2746,7 @@ func TestPodVolumeTypeEmptyDir(t *testing.T) { // Check each role for its volume mount for _, roleName := range []string{"main-role", "to-be-colocated"} { - role := roleManifest.LookupRole(roleName) + role := roleManifest.LookupInstanceGroup(roleName) mounts := getVolumeMounts(role, true) assert.NotNil(mounts) diff --git a/kube/service.go b/kube/service.go index ea8ac461..2d5fb475 100644 --- a/kube/service.go +++ b/kube/service.go @@ -10,7 +10,7 @@ import ( // NewServiceList creates a list of services // clustering should be true if a kubernetes headless service should be created // (for self-clustering roles, to reach each pod individually) -func NewServiceList(role *model.Role, clustering bool, settings ExportSettings) (helm.Node, error) { +func NewServiceList(role *model.InstanceGroup, clustering bool, settings ExportSettings) (helm.Node, error) { var items []helm.Node if clustering { @@ -65,7 +65,7 @@ const ( ) // newService creates a new k8s service (ClusterIP or LoadBalanced) -func newService(role *model.Role, serviceType newServiceType, settings ExportSettings) (helm.Node, error) { +func newService(role *model.InstanceGroup, serviceType newServiceType, settings ExportSettings) (helm.Node, error) { var ports []helm.Node for _, port := range role.Run.ExposedPorts { if serviceType == newServiceTypePublic && !port.Public { diff --git a/kube/service_test.go b/kube/service_test.go index 4585fa5e..72bc68df 100644 --- a/kube/service_test.go +++ b/kube/service_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func serviceTestLoadRole(assert *assert.Assertions, manifestName string) (*model.RoleManifest, *model.Role) { +func serviceTestLoadRole(assert *assert.Assertions, manifestName string) (*model.RoleManifest, *model.InstanceGroup) { workDir, err := os.Getwd() assert.NoError(err) @@ -30,7 +30,7 @@ func serviceTestLoadRole(assert *assert.Assertions, manifestName string) (*model if !assert.NoError(err) { return nil, nil } - role := manifest.LookupRole("myrole") + role := manifest.LookupInstanceGroup("myrole") if !assert.NotNil(role, "Failed to find role in manifest") { return nil, nil } diff --git a/kube/stateful_set.go b/kube/stateful_set.go index 3ee54051..e8e3b6fa 100644 --- a/kube/stateful_set.go +++ b/kube/stateful_set.go @@ -9,7 +9,7 @@ import ( ) // NewStatefulSet returns a stateful set and a list of services for the given role -func NewStatefulSet(role *model.Role, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, helm.Node, error) { +func NewStatefulSet(role *model.InstanceGroup, settings ExportSettings, grapher util.ModelGrapher) (helm.Node, helm.Node, error) { // For each StatefulSet, we need two services -- one for the public (inside // the namespace) endpoint, and one headless service to control the pods. if role == nil { @@ -57,7 +57,7 @@ func NewStatefulSet(role *model.Role, settings ExportSettings, grapher util.Mode } // getVolumeClaims returns the list of persistent and shared volume claims from a role -func getVolumeClaims(role *model.Role, createHelmChart bool) []helm.Node { +func getVolumeClaims(role *model.InstanceGroup, createHelmChart bool) []helm.Node { var claims []helm.Node for _, volume := range role.Run.Volumes { var accessMode string diff --git a/kube/stateful_set_test.go b/kube/stateful_set_test.go index a34c06ad..976d7426 100644 --- a/kube/stateful_set_test.go +++ b/kube/stateful_set_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func statefulSetTestLoadManifest(assert *assert.Assertions, manifestName string) (*model.RoleManifest, *model.Role) { +func statefulSetTestLoadManifest(assert *assert.Assertions, manifestName string) (*model.RoleManifest, *model.InstanceGroup) { workDir, err := os.Getwd() assert.NoError(err) @@ -30,7 +30,7 @@ func statefulSetTestLoadManifest(assert *assert.Assertions, manifestName string) return nil, nil } - role := manifest.LookupRole("myrole") + role := manifest.LookupInstanceGroup("myrole") if !assert.NotNil(role, "Failed to find role in manifest") { return nil, nil } diff --git a/kube/values.go b/kube/values.go index 3f108282..5eb148c1 100644 --- a/kube/values.go +++ b/kube/values.go @@ -73,50 +73,50 @@ func MakeValues(settings ExportSettings) (helm.Node, error) { sizing := helm.NewMapping() sizing.Set(helm.Comment(strings.Join(strings.Fields(` - The sizing section contains configuration to change each individual role. - Due to limitations on the allowable names, any dashes ("-") in the role - names are replaced with underscores ("_"). + The sizing section contains configuration to change each individual instance + group. Due to limitations on the allowable names, any dashes ("-") in the + instance group names are replaced with underscores ("_"). `), " "))) - for _, role := range settings.RoleManifest.Roles { - if role.Run.FlightStage == model.FlightStageManual { + for _, instanceGroup := range settings.RoleManifest.InstanceGroups { + if instanceGroup.Run.FlightStage == model.FlightStageManual { continue } entry := helm.NewMapping() - if !role.IsPrivileged() { + if !instanceGroup.IsPrivileged() { entry.Add("capabilities", helm.NewList(), helm.Comment("Additional privileges can be specified here")) } var comment string - if role.Run.Scaling.Min == role.Run.Scaling.Max { - comment = fmt.Sprintf("The %s role cannot be scaled.", role.Name) + if instanceGroup.Run.Scaling.Min == instanceGroup.Run.Scaling.Max { + comment = fmt.Sprintf("The %s instance group cannot be scaled.", instanceGroup.Name) } else { - comment = fmt.Sprintf("The %s role can scale between %d and %d instances.", - role.Name, role.Run.Scaling.Min, role.Run.Scaling.Max) + comment = fmt.Sprintf("The %s instance group can scale between %d and %d instances.", + instanceGroup.Name, instanceGroup.Run.Scaling.Min, instanceGroup.Run.Scaling.Max) - if role.Run.Scaling.MustBeOdd { + if instanceGroup.Run.Scaling.MustBeOdd { comment += "\nThe instance count must be an odd number (not divisible by 2)." } - if role.Run.Scaling.HA != role.Run.Scaling.Min { + if instanceGroup.Run.Scaling.HA != instanceGroup.Run.Scaling.Min { comment += fmt.Sprintf("\nFor high availability it needs at least %d instances.", - role.Run.Scaling.HA) + instanceGroup.Run.Scaling.HA) } } - entry.Add("count", role.Run.Scaling.Min, helm.Comment(comment)) + entry.Add("count", instanceGroup.Run.Scaling.Min, helm.Comment(comment)) if settings.UseMemoryLimits { var request helm.Node - if role.Run.Memory.Request == nil { + if instanceGroup.Run.Memory.Request == nil { request = helm.NewNode(nil) } else { - request = helm.NewNode(int(*role.Run.Memory.Request)) + request = helm.NewNode(int(*instanceGroup.Run.Memory.Request)) } var limit helm.Node - if role.Run.Memory.Limit == nil { + if instanceGroup.Run.Memory.Limit == nil { limit = helm.NewNode(nil) } else { - limit = helm.NewNode(int(*role.Run.Memory.Limit)) + limit = helm.NewNode(int(*instanceGroup.Run.Memory.Limit)) } entry.Add("memory", helm.NewMapping( @@ -126,16 +126,16 @@ func MakeValues(settings ExportSettings) (helm.Node, error) { } if settings.UseCPULimits { var request helm.Node - if role.Run.CPU.Request == nil { + if instanceGroup.Run.CPU.Request == nil { request = helm.NewNode(nil) } else { - request = helm.NewNode(1000. * *role.Run.CPU.Request) + request = helm.NewNode(1000. * *instanceGroup.Run.CPU.Request) } var limit helm.Node - if role.Run.CPU.Limit == nil { + if instanceGroup.Run.CPU.Limit == nil { limit = helm.NewNode(nil) } else { - limit = helm.NewNode(1000. * *role.Run.CPU.Limit) + limit = helm.NewNode(1000. * *instanceGroup.Run.CPU.Limit) } entry.Add("cpu", helm.NewMapping( @@ -145,7 +145,7 @@ func MakeValues(settings ExportSettings) (helm.Node, error) { } diskSizes := helm.NewMapping() - for _, volume := range role.Run.Volumes { + for _, volume := range instanceGroup.Run.Volumes { switch volume.Type { case model.VolumeTypePersistent, model.VolumeTypeShared: diskSizes.Add(makeVarName(volume.Tag), volume.Size) @@ -155,7 +155,7 @@ func MakeValues(settings ExportSettings) (helm.Node, error) { entry.Add("disk_sizes", diskSizes.Sort()) } ports := helm.NewMapping() - for _, port := range role.Run.ExposedPorts { + for _, port := range instanceGroup.Run.ExposedPorts { config := helm.NewMapping() if port.PortIsConfigurable { config.Add("port", port.ExternalPort) @@ -173,7 +173,7 @@ func MakeValues(settings ExportSettings) (helm.Node, error) { entry.Add("affinity", helm.NewMapping(), helm.Comment("Node affinity rules can be specified here")) - sizing.Add(makeVarName(role.Name), entry.Sort(), helm.Comment(role.GetLongDescription())) + sizing.Add(makeVarName(instanceGroup.Name), entry.Sort(), helm.Comment(instanceGroup.GetLongDescription())) } values.Add("sizing", sizing.Sort()) diff --git a/kube/values_test.go b/kube/values_test.go index b624f450..13a157b8 100644 --- a/kube/values_test.go +++ b/kube/values_test.go @@ -24,8 +24,8 @@ func TestMakeValues(t *testing.T) { settings := ExportSettings{ OutputDir: outDir, RoleManifest: &model.RoleManifest{ - Roles: model.Roles{ - &model.Role{ + InstanceGroups: model.InstanceGroups{ + &model.InstanceGroup{ Name: "arole", Run: &model.RoleRun{ Scaling: &model.RoleRunScaling{}, @@ -57,8 +57,8 @@ func TestMakeValues(t *testing.T) { settings := ExportSettings{ OutputDir: outDir, RoleManifest: &model.RoleManifest{ - Roles: model.Roles{ - &model.Role{ + InstanceGroups: model.InstanceGroups{ + &model.InstanceGroup{ Name: "arole", Run: &model.RoleRun{ Scaling: &model.RoleRunScaling{}, @@ -82,7 +82,7 @@ func TestMakeValues(t *testing.T) { t.Parallel() settings := ExportSettings{ OutputDir: outDir, - RoleManifest: &model.RoleManifest{Roles: model.Roles{}, + RoleManifest: &model.RoleManifest{InstanceGroups: model.InstanceGroups{}, Configuration: &model.Configuration{}, }, } @@ -101,7 +101,7 @@ func TestMakeValues(t *testing.T) { t.Parallel() settings := ExportSettings{ OutputDir: outDir, - RoleManifest: &model.RoleManifest{Roles: model.Roles{}, + RoleManifest: &model.RoleManifest{InstanceGroups: model.InstanceGroups{}, Configuration: &model.Configuration{}, }, } @@ -122,7 +122,7 @@ func TestMakeValues(t *testing.T) { t.Parallel() settings := ExportSettings{ OutputDir: outDir, - RoleManifest: &model.RoleManifest{Roles: model.Roles{}, + RoleManifest: &model.RoleManifest{InstanceGroups: model.InstanceGroups{}, Configuration: &model.Configuration{}, }, } @@ -141,7 +141,7 @@ func TestMakeValues(t *testing.T) { t.Parallel() settings := ExportSettings{ OutputDir: outDir, - RoleManifest: &model.RoleManifest{Roles: model.Roles{}, + RoleManifest: &model.RoleManifest{InstanceGroups: model.InstanceGroups{}, Configuration: &model.Configuration{}, }, } diff --git a/model/mustache.go b/model/mustache.go index 5c4e6e1a..ee7def78 100644 --- a/model/mustache.go +++ b/model/mustache.go @@ -26,7 +26,7 @@ func MakeMapOfVariables(roleManifest *RoleManifest) CVMap { // GetVariablesForRole returns all the environment variables required for // calculating all the templates for the role -func (r *Role) GetVariablesForRole() (ConfigurationVariableSlice, error) { +func (r *InstanceGroup) GetVariablesForRole() (ConfigurationVariableSlice, error) { configsDictionary := MakeMapOfVariables(r.roleManifest) @@ -34,8 +34,8 @@ func (r *Role) GetVariablesForRole() (ConfigurationVariableSlice, error) { // First, render all referenced variables of type user. - for _, roleJob := range r.RoleJobs { - for _, property := range roleJob.Properties { + for _, jobReference := range r.JobReferences { + for _, property := range jobReference.Properties { propertyName := fmt.Sprintf("properties.%s", property.Name) for templatePropName, template := range r.Configuration.Templates { diff --git a/model/mustache_test.go b/model/mustache_test.go index 736247d5..29ef5186 100644 --- a/model/mustache_test.go +++ b/model/mustache_test.go @@ -47,7 +47,7 @@ func TestRoleVariables(t *testing.T) { assert.NoError(t, err) require.NotNil(t, roleManifest) - vars, err := roleManifest.Roles[0].GetVariablesForRole() + vars, err := roleManifest.InstanceGroups[0].GetVariablesForRole() assert.NoError(t, err) assert.NotNil(t, vars) diff --git a/model/roles.go b/model/roles.go index fd8965e8..45d77b80 100644 --- a/model/roles.go +++ b/model/roles.go @@ -56,14 +56,14 @@ const ( // RoleManifest represents a collection of roles type RoleManifest struct { - Roles Roles `yaml:"roles"` - Configuration *Configuration `yaml:"configuration"` + InstanceGroups InstanceGroups `yaml:"instance_groups"` + Configuration *Configuration `yaml:"configuration"` manifestFilePath string } -// RoleJob represents a job in the context of a role -type RoleJob struct { +// JobReference represents a job in the context of a role +type JobReference struct { *Job `yaml:"-"` // The resolved job Name string `yaml:"name"` // The name of the job ReleaseName string `yaml:"release_name"` // The release the job comes from @@ -82,19 +82,19 @@ const ( RoleTagActivePassive = RoleTag("active-passive") ) -// Role represents a collection of jobs that are colocated on a container -type Role struct { - Name string `yaml:"name"` - Description string `yaml:"description"` - EnvironScripts []string `yaml:"environment_scripts"` - Scripts []string `yaml:"scripts"` - PostConfigScripts []string `yaml:"post_config_scripts"` - Type RoleType `yaml:"type,omitempty"` - RoleJobs []*RoleJob `yaml:"jobs"` - Configuration *Configuration `yaml:"configuration"` - Run *RoleRun `yaml:"run"` - Tags []RoleTag `yaml:"tags"` - ColocatedContainers []string `yaml:"colocated_containers,omitempty"` +// InstanceGroup represents a collection of jobs that are colocated on a container +type InstanceGroup struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + EnvironScripts []string `yaml:"environment_scripts"` + Scripts []string `yaml:"scripts"` + PostConfigScripts []string `yaml:"post_config_scripts"` + Type RoleType `yaml:"type,omitempty"` + JobReferences []*JobReference `yaml:"jobs"` + Configuration *Configuration `yaml:"configuration"` + Run *RoleRun `yaml:"run"` + Tags []RoleTag `yaml:"tags"` + ColocatedContainers []string `yaml:"colocated_containers,omitempty"` roleManifest *RoleManifest } @@ -191,8 +191,8 @@ type HealthProbe struct { FailureThreshold int `yaml:"failure_threshold,omitempty"` // Failure threshold in seconds, default 3, minimum 1 } -// Roles is an array of Role* -type Roles []*Role +// InstanceGroups is an array of Role* +type InstanceGroups []*InstanceGroup // GeneratorType describes the type of generator used for the configuration value type GeneratorType string @@ -330,19 +330,19 @@ type ConfigurationVariableGenerator struct { ValueType string `yaml:"value_type"` } -// Len is the number of roles in the slice -func (roles Roles) Len() int { - return len(roles) +// Len is the number of instance groups in the slice +func (igs InstanceGroups) Len() int { + return len(igs) } // Less reports whether role at index i sort before role at index j -func (roles Roles) Less(i, j int) bool { - return strings.Compare(roles[i].Name, roles[j].Name) < 0 +func (igs InstanceGroups) Less(i, j int) bool { + return strings.Compare(igs[i].Name, igs[j].Name) < 0 } // Swap exchanges roles at index i and index j -func (roles Roles) Swap(i, j int) { - roles[i], roles[j] = roles[j], roles[i] +func (igs InstanceGroups) Swap(i, j int) { + igs[i], igs[j] = igs[j], igs[i] } // LoadRoleManifest loads a yaml manifest that details how jobs get grouped into roles @@ -395,92 +395,92 @@ func (m *RoleManifest) resolveRoleManifest(releases []*Release, grapher util.Mod allErrs := validation.ErrorList{} - for i := len(m.Roles) - 1; i >= 0; i-- { - role := m.Roles[i] + for i := len(m.InstanceGroups) - 1; i >= 0; i-- { + instanceGroup := m.InstanceGroups[i] - // Remove all roles that are not of the "bosh" or "bosh-task" type + // Remove all instance groups that are not of the "bosh" or "bosh-task" type // Default type is considered to be "bosh". - // The kept roles are validated. - switch role.Type { + // The kept instance groups are validated. + switch instanceGroup.Type { case "": - role.Type = RoleTypeBosh + instanceGroup.Type = RoleTypeBosh case RoleTypeBosh, RoleTypeBoshTask, RoleTypeColocatedContainer: // Nothing to do. case RoleTypeDocker: - m.Roles = append(m.Roles[:i], m.Roles[i+1:]...) + m.InstanceGroups = append(m.InstanceGroups[:i], m.InstanceGroups[i+1:]...) default: allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].type", role.Name), - role.Type, "Expected one of bosh, bosh-task, docker, or colocated-container")) + fmt.Sprintf("instance_groups[%s].type", instanceGroup.Name), + instanceGroup.Type, "Expected one of bosh, bosh-task, docker, or colocated-container")) } - allErrs = append(allErrs, validateRoleTags(role)...) - allErrs = append(allErrs, validateRoleRun(role, m, declaredConfigs)...) + allErrs = append(allErrs, validateRoleTags(instanceGroup)...) + allErrs = append(allErrs, validateRoleRun(instanceGroup, m, declaredConfigs)...) } - for _, role := range m.Roles { - role.roleManifest = m + for _, instanceGroup := range m.InstanceGroups { + instanceGroup.roleManifest = m - if role.Run != nil && role.Run.ActivePassiveProbe != "" { - if !role.HasTag(RoleTagActivePassive) { + if instanceGroup.Run != nil && instanceGroup.Run.ActivePassiveProbe != "" { + if !instanceGroup.HasTag(RoleTagActivePassive) { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.active-passive-probe", role.Name), - role.Run.ActivePassiveProbe, - "Active/passive probes are only valid on roles with active-passive tag")) + fmt.Sprintf("instance_groups[%s].run.active-passive-probe", instanceGroup.Name), + instanceGroup.Run.ActivePassiveProbe, + "Active/passive probes are only valid on instance groups with active-passive tag")) } } - for _, roleJob := range role.RoleJobs { - release, ok := mappedReleases[roleJob.ReleaseName] + for _, jobReference := range instanceGroup.JobReferences { + release, ok := mappedReleases[jobReference.ReleaseName] if !ok { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].jobs[%s]", role.Name, roleJob.Name), - roleJob.ReleaseName, + fmt.Sprintf("instance_groups[%s].jobs[%s]", instanceGroup.Name, jobReference.Name), + jobReference.ReleaseName, "Referenced release is not loaded")) continue } - job, err := release.LookupJob(roleJob.Name) + job, err := release.LookupJob(jobReference.Name) if err != nil { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].jobs[%s]", role.Name, roleJob.Name), - roleJob.ReleaseName, err.Error())) + fmt.Sprintf("instance_groups[%s].jobs[%s]", instanceGroup.Name, jobReference.Name), + jobReference.ReleaseName, err.Error())) continue } - roleJob.Job = job + jobReference.Job = job if grapher != nil { _ = grapher.GraphNode(job.Fingerprint, map[string]string{"label": "job/" + job.Name}) } - if roleJob.ResolvedConsumers == nil { + if jobReference.ResolvedConsumers == nil { // No explicitly specified consumers - roleJob.ResolvedConsumers = make(map[string]jobConsumesInfo) + jobReference.ResolvedConsumers = make(map[string]jobConsumesInfo) } - for name, info := range roleJob.ResolvedConsumers { + for name, info := range jobReference.ResolvedConsumers { info.Name = name - roleJob.ResolvedConsumers[name] = info + jobReference.ResolvedConsumers[name] = info } } - role.calculateRoleConfigurationTemplates() + instanceGroup.calculateRoleConfigurationTemplates() // Validate that specified colocated containers are configured and of the // correct type - for idx, roleName := range role.ColocatedContainers { - if lookupRole := m.LookupRole(roleName); lookupRole == nil { + for idx, roleName := range instanceGroup.ColocatedContainers { + if lookupRole := m.LookupInstanceGroup(roleName); lookupRole == nil { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].colocated_containers[%d]", role.Name, idx), + fmt.Sprintf("instance_groups[%s].colocated_containers[%d]", instanceGroup.Name, idx), roleName, - "There is no such role defined")) + "There is no such instance group defined")) } else if lookupRole.Type != RoleTypeColocatedContainer { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].colocated_containers[%d]", role.Name, idx), + fmt.Sprintf("instance_groups[%s].colocated_containers[%d]", instanceGroup.Name, idx), roleName, - "The role is not of required type colocated-container")) + "The instance group is not of required type colocated-container")) } } } @@ -508,11 +508,11 @@ func (m *RoleManifest) resolveRoleManifest(releases []*Release, grapher util.Mod return nil } -// LookupRole will find the given role in the role manifest -func (m *RoleManifest) LookupRole(roleName string) *Role { - for _, role := range m.Roles { - if role.Name == roleName { - return role +// LookupInstanceGroup will find the given instance group in the role manifest +func (m *RoleManifest) LookupInstanceGroup(name string) *InstanceGroup { + for _, instanceGroup := range m.InstanceGroups { + if instanceGroup.Name == name { + return instanceGroup } } return nil @@ -527,28 +527,28 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { // involved here are the aliases, where appropriate. providersByName := make(map[string]jobProvidesInfo) providersByType := make(map[string][]jobProvidesInfo) - for _, role := range m.Roles { - for _, roleJob := range role.RoleJobs { + for _, instanceGroup := range m.InstanceGroups { + for _, jobReference := range instanceGroup.JobReferences { var availableProviders []string - for availableName, availableProvider := range roleJob.Job.AvailableProviders { + for availableName, availableProvider := range jobReference.Job.AvailableProviders { availableProviders = append(availableProviders, availableName) if availableProvider.Type != "" { providersByType[availableProvider.Type] = append(providersByType[availableProvider.Type], jobProvidesInfo{ jobLinkInfo: jobLinkInfo{ Name: availableProvider.Name, Type: availableProvider.Type, - RoleName: role.Name, - JobName: roleJob.Name, + RoleName: instanceGroup.Name, + JobName: jobReference.Name, }, Properties: availableProvider.Properties, }) } } - for name, provider := range roleJob.ExportedProviders { - info, ok := roleJob.Job.AvailableProviders[name] + for name, provider := range jobReference.ExportedProviders { + info, ok := jobReference.Job.AvailableProviders[name] if !ok { errors = append(errors, validation.NotFound( - fmt.Sprintf("roles[%s].jobs[%s].provides[%s]", role.Name, roleJob.Name, name), + fmt.Sprintf("instance_groups[%s].jobs[%s].provides[%s]", instanceGroup.Name, jobReference.Name, name), fmt.Sprintf("Provider not found; available providers: %v", availableProviders))) continue } @@ -559,8 +559,8 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { jobLinkInfo: jobLinkInfo{ Name: info.Name, Type: info.Type, - RoleName: role.Name, - JobName: roleJob.Name, + RoleName: instanceGroup.Name, + JobName: jobReference.Name, }, Properties: info.Properties, } @@ -569,12 +569,12 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { } // Resolve the consumers - for _, role := range m.Roles { - for _, roleJob := range role.RoleJobs { - expectedConsumers := make([]jobConsumesInfo, len(roleJob.Job.DesiredConsumers)) - copy(expectedConsumers, roleJob.Job.DesiredConsumers) + for _, instanceGroup := range m.InstanceGroups { + for _, jobReference := range instanceGroup.JobReferences { + expectedConsumers := make([]jobConsumesInfo, len(jobReference.Job.DesiredConsumers)) + copy(expectedConsumers, jobReference.Job.DesiredConsumers) // Deal with any explicitly marked consumers in the role manifest - for consumerName, consumerInfo := range roleJob.ResolvedConsumers { + for consumerName, consumerInfo := range jobReference.ResolvedConsumers { consumerAlias := consumerName if consumerInfo.Alias != "" { consumerAlias = consumerInfo.Alias @@ -582,7 +582,7 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { if consumerAlias == "" { // There was a consumer with an explicitly empty name errors = append(errors, validation.Invalid( - fmt.Sprintf(`role[%s].job[%s]`, role.Name, roleJob.Name), + fmt.Sprintf(`instance_group[%s].job[%s]`, instanceGroup.Name, jobReference.Name), "name", fmt.Sprintf("consumer has no name"))) continue @@ -590,11 +590,11 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { provider, ok := providersByName[consumerAlias] if !ok { errors = append(errors, validation.NotFound( - fmt.Sprintf(`role[%s].job[%s].consumes[%s]`, role.Name, roleJob.Name, consumerName), + fmt.Sprintf(`instance_group[%s].job[%s].consumes[%s]`, instanceGroup.Name, jobReference.Name, consumerName), fmt.Sprintf(`consumer %s not found`, consumerAlias))) continue } - roleJob.ResolvedConsumers[consumerName] = jobConsumesInfo{ + jobReference.ResolvedConsumers[consumerName] = jobConsumesInfo{ jobLinkInfo: provider.jobLinkInfo, } @@ -624,15 +624,15 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { if name == "" { name = provider.Name } - info := roleJob.ResolvedConsumers[name] + info := jobReference.ResolvedConsumers[name] info.Name = provider.Name info.Type = provider.Type info.RoleName = provider.RoleName info.JobName = provider.JobName - roleJob.ResolvedConsumers[name] = info + jobReference.ResolvedConsumers[name] = info } else if !consumerInfo.Optional { errors = append(errors, validation.Required( - fmt.Sprintf(`role[%s].job[%s].consumes[%s]`, role.Name, roleJob.Name, consumerInfo.Name), + fmt.Sprintf(`instance_group[%s].job[%s].consumes[%s]`, instanceGroup.Name, jobReference.Name, consumerInfo.Name), fmt.Sprintf(`failed to resolve provider %s (type %s)`, consumerInfo.Name, consumerInfo.Type))) } } @@ -642,44 +642,44 @@ func (m *RoleManifest) resolveLinks() validation.ErrorList { return errors } -// SelectRoles will find only the given roles in the role manifest -func (m *RoleManifest) SelectRoles(roleNames []string) (Roles, error) { +// SelectInstanceGroups will find only the given instance groups in the role manifest +func (m *RoleManifest) SelectInstanceGroups(roleNames []string) (InstanceGroups, error) { if len(roleNames) == 0 { - // No role names specified, assume all roles - return m.Roles, nil + // No names specified, assume all instance groups + return m.InstanceGroups, nil } - var results Roles + var results InstanceGroups var missingRoles []string for _, roleName := range roleNames { - if role := m.LookupRole(roleName); role != nil { - results = append(results, role) + if instanceGroup := m.LookupInstanceGroup(roleName); instanceGroup != nil { + results = append(results, instanceGroup) } else { missingRoles = append(missingRoles, roleName) } } if len(missingRoles) > 0 { - return nil, fmt.Errorf("Some roles are unknown: %v", missingRoles) + return nil, fmt.Errorf("Some instance groups are unknown: %v", missingRoles) } return results, nil } -// GetLongDescription returns the description of the role plus a list of all included jobs -func (r *Role) GetLongDescription() string { - desc := r.Description +// GetLongDescription returns the description of the instance group plus a list of all included jobs +func (g *InstanceGroup) GetLongDescription() string { + desc := g.Description if len(desc) > 0 { desc += "\n\n" } - desc += fmt.Sprintf("The %s role contains the following jobs:", r.Name) + desc += fmt.Sprintf("The %s instance group contains the following jobs:", g.Name) var noDesc []string also := "" - for _, roleJob := range r.RoleJobs { - if roleJob.Description == "" { - noDesc = append(noDesc, roleJob.Name) + for _, jobReference := range g.JobReferences { + if jobReference.Description == "" { + noDesc = append(noDesc, jobReference.Name) } else { - desc += fmt.Sprintf("\n\n- %s: %s", roleJob.Name, roleJob.Description) + desc += fmt.Sprintf("\n\n- %s: %s", jobReference.Name, jobReference.Description) also = "Also: " } } @@ -689,17 +689,17 @@ func (r *Role) GetLongDescription() string { return desc } -// GetScriptPaths returns the paths to the startup / post configgin scripts for a role -func (r *Role) GetScriptPaths() map[string]string { +// GetScriptPaths returns the paths to the startup / post configgin scripts for a instance group +func (g *InstanceGroup) GetScriptPaths() map[string]string { result := map[string]string{} - for _, scriptList := range [][]string{r.EnvironScripts, r.Scripts, r.PostConfigScripts} { + for _, scriptList := range [][]string{g.EnvironScripts, g.Scripts, g.PostConfigScripts} { for _, script := range scriptList { if filepath.IsAbs(script) { // Absolute paths _inside_ the container; there is nothing to copy continue } - result[script] = filepath.Join(filepath.Dir(r.roleManifest.manifestFilePath), script) + result[script] = filepath.Join(filepath.Dir(g.roleManifest.manifestFilePath), script) } } @@ -708,10 +708,10 @@ func (r *Role) GetScriptPaths() map[string]string { } // GetScriptSignatures returns the SHA1 of all of the script file names and contents -func (r *Role) GetScriptSignatures() (string, error) { +func (g *InstanceGroup) GetScriptSignatures() (string, error) { hasher := sha1.New() - paths := r.GetScriptPaths() + paths := g.GetScriptPaths() scripts := make([]string, 0, len(paths)) for filename := range paths { @@ -739,13 +739,13 @@ func (r *Role) GetScriptSignatures() (string, error) { } // GetTemplateSignatures returns the SHA1 of all of the templates and contents -func (r *Role) GetTemplateSignatures() (string, error) { +func (g *InstanceGroup) GetTemplateSignatures() (string, error) { hasher := sha1.New() i := 0 - templates := make([]string, len(r.Configuration.Templates)) + templates := make([]string, len(g.Configuration.Templates)) - for k, v := range r.Configuration.Templates { + for k, v := range g.Configuration.Templates { templates[i] = fmt.Sprintf("%s: %s", k, v) i++ } @@ -763,20 +763,19 @@ func (r *Role) GetTemplateSignatures() (string, error) { // role dev version, and the aggregated spec and opinion // information. In this manner opinion changes cause a rebuild of the // associated role images. -func (r *Role) GetRoleDevVersion(opinions *Opinions, tagExtra, fissileVersion string, grapher util.ModelGrapher) (string, error) { +func (g *InstanceGroup) GetRoleDevVersion(opinions *Opinions, tagExtra, fissileVersion string, grapher util.ModelGrapher) (string, error) { // Basic role version - jobPkgVersion, inputSigs, err := r.getRoleJobAndPackagesSignature(grapher) + jobPkgVersion, inputSigs, err := g.getRoleJobAndPackagesSignature(grapher) if err != nil { - return "", fmt.Errorf("Error calculating checksum for role %s: %s", r.Name, err.Error()) + return "", fmt.Errorf("Error calculating checksum for instance group %s: %s", g.Name, err.Error()) } - // Aggregate with the properties from the opinions, per each - // job in the role. This is similar to what NewDockerPopulator - // (and its subordinate WriteConfigs) do, with an important - // difference: - // - NDP/WC does not care about order. We do, as we need a - // stable hash for the configuration. + // Aggregate with the properties from the opinions, per each job in the + // instance group. This is similar to what NewDockerPopulator (and its + // subordinate WriteConfigs) do, with an important difference: + // - NDP/WC does not care about order. We do, as we need a stable hash for the + // configuration. signatures := []string{ jobPkgVersion, fissileVersion, @@ -791,9 +790,9 @@ func (r *Role) GetRoleDevVersion(opinions *Opinions, tagExtra, fissileVersion st // fix. Avoid sorting for now. Also note, if a property is // used multiple times, in different jobs, it will be added // that often. No deduplication across the jobs. - for _, roleJob := range r.RoleJobs { + for _, jobReference := range g.JobReferences { // Get properties ... - properties, err := roleJob.GetPropertiesForJob(opinions) + properties, err := jobReference.GetPropertiesForJob(opinions) if err != nil { return "", err } @@ -826,17 +825,17 @@ func (r *Role) GetRoleDevVersion(opinions *Opinions, tagExtra, fissileVersion st } if grapher != nil { extraGraphEdges = append(extraGraphEdges, []string{ - fmt.Sprintf("properties/%s:", roleJob.Name), + fmt.Sprintf("properties/%s:", jobReference.Name), hex.EncodeToString(propertyHasher.Sum(nil))}) } } devVersion := AggregateSignatures(signatures) if grapher != nil { - _ = grapher.GraphNode(devVersion, map[string]string{"label": "role/" + r.Name}) + _ = grapher.GraphNode(devVersion, map[string]string{"label": "role/" + g.Name}) for _, inputSig := range inputSigs { _ = grapher.GraphEdge(inputSig, jobPkgVersion, nil) } - _ = grapher.GraphNode(jobPkgVersion, map[string]string{"label": "role/jobpkg/" + r.Name}) + _ = grapher.GraphNode(jobPkgVersion, map[string]string{"label": "role/jobpkg/" + g.Name}) _ = grapher.GraphEdge(jobPkgVersion, devVersion, nil) for _, extraGraphEdgeParts := range extraGraphEdges { prefix := extraGraphEdgeParts[0] @@ -853,23 +852,23 @@ func (r *Role) GetRoleDevVersion(opinions *Opinions, tagExtra, fissileVersion st // getRoleJobAndPackagesSignature gets the aggregate signature of all jobs and packages // It also returns a list of all hashes involved in calculating the final result -func (r *Role) getRoleJobAndPackagesSignature(grapher util.ModelGrapher) (string, []string, error) { +func (g *InstanceGroup) getRoleJobAndPackagesSignature(grapher util.ModelGrapher) (string, []string, error) { roleSignature := "" var inputs []string var packages Packages // Jobs are *not* sorted because they are an array and the order may be // significant, in particular for bosh-task roles. - for _, roleJob := range r.RoleJobs { - roleSignature = fmt.Sprintf("%s\n%s", roleSignature, roleJob.SHA1) - packages = append(packages, roleJob.Packages...) - inputs = append(inputs, roleJob.Fingerprint) + for _, jobReference := range g.JobReferences { + roleSignature = fmt.Sprintf("%s\n%s", roleSignature, jobReference.SHA1) + packages = append(packages, jobReference.Packages...) + inputs = append(inputs, jobReference.Fingerprint) if grapher != nil { - _ = grapher.GraphNode(roleJob.Fingerprint, - map[string]string{"label": fmt.Sprintf("job/%s/%s", roleJob.ReleaseName, roleJob.Name)}) - _ = grapher.GraphEdge("release/"+roleJob.ReleaseName, roleJob.Fingerprint, nil) - for _, pkg := range roleJob.Packages { - _ = grapher.GraphEdge("release/"+roleJob.ReleaseName, pkg.Fingerprint, nil) + _ = grapher.GraphNode(jobReference.Fingerprint, + map[string]string{"label": fmt.Sprintf("job/%s/%s", jobReference.ReleaseName, jobReference.Name)}) + _ = grapher.GraphEdge("release/"+jobReference.ReleaseName, jobReference.Fingerprint, nil) + for _, pkg := range jobReference.Packages { + _ = grapher.GraphEdge("release/"+jobReference.ReleaseName, pkg.Fingerprint, nil) } } } @@ -884,15 +883,15 @@ func (r *Role) getRoleJobAndPackagesSignature(grapher util.ModelGrapher) (string } // Collect signatures for various script sections - sig, err := r.GetScriptSignatures() + sig, err := g.GetScriptSignatures() if err != nil { return "", nil, err } roleSignature = fmt.Sprintf("%s\n%s", roleSignature, sig) // If there are templates, generate signature for them - if r.Configuration != nil && r.Configuration.Templates != nil { - sig, err = r.GetTemplateSignatures() + if g.Configuration != nil && g.Configuration.Templates != nil { + sig, err = g.GetTemplateSignatures() if err != nil { return "", nil, err } @@ -905,8 +904,8 @@ func (r *Role) getRoleJobAndPackagesSignature(grapher util.ModelGrapher) (string } // HasTag returns true if the role has a specific tag -func (r *Role) HasTag(tag RoleTag) bool { - for _, t := range r.Tags { +func (g *InstanceGroup) HasTag(tag RoleTag) bool { + for _, t := range g.Tags { if t == tag { return true } @@ -915,28 +914,28 @@ func (r *Role) HasTag(tag RoleTag) bool { return false } -func (r *Role) calculateRoleConfigurationTemplates() { - if r.Configuration == nil { - r.Configuration = &Configuration{} +func (g *InstanceGroup) calculateRoleConfigurationTemplates() { + if g.Configuration == nil { + g.Configuration = &Configuration{} } - if r.Configuration.Templates == nil { - r.Configuration.Templates = map[string]string{} + if g.Configuration.Templates == nil { + g.Configuration.Templates = map[string]string{} } roleConfigs := map[string]string{} - for k, v := range r.roleManifest.Configuration.Templates { + for k, v := range g.roleManifest.Configuration.Templates { roleConfigs[k] = v } - for k, v := range r.Configuration.Templates { + for k, v := range g.Configuration.Templates { roleConfigs[k] = v } - r.Configuration.Templates = roleConfigs + g.Configuration.Templates = roleConfigs } // WriteConfigs merges the job's spec with the opinions and returns the result as JSON. -func (roleJob *RoleJob) WriteConfigs(role *Role, lightOpinionsPath, darkOpinionsPath string) ([]byte, error) { +func (j *JobReference) WriteConfigs(instanceGroup *InstanceGroup, lightOpinionsPath, darkOpinionsPath string) ([]byte, error) { var config struct { Job struct { Name string `json:"name"` @@ -956,9 +955,9 @@ func (roleJob *RoleJob) WriteConfigs(role *Role, lightOpinionsPath, darkOpinions config.ExportedProperties = make([]string, 0) config.Consumes = make(map[string]jobLinkInfo) - config.Job.Name = role.Name + config.Job.Name = instanceGroup.Name - for _, consumer := range roleJob.ResolvedConsumers { + for _, consumer := range j.ResolvedConsumers { config.Consumes[consumer.Name] = consumer.jobLinkInfo } @@ -966,13 +965,13 @@ func (roleJob *RoleJob) WriteConfigs(role *Role, lightOpinionsPath, darkOpinions if err != nil { return nil, err } - properties, err := roleJob.Job.GetPropertiesForJob(opinions) + properties, err := j.Job.GetPropertiesForJob(opinions) if err != nil { return nil, err } config.Properties = properties - for _, provider := range roleJob.Job.AvailableProviders { + for _, provider := range j.Job.AvailableProviders { config.ExportedProperties = append(config.ExportedProperties, provider.Properties...) } @@ -1075,9 +1074,9 @@ func validateVariableUsage(roleManifest *RoleManifest) validation.ErrorList { // variables. Remove each found from the set of unused // configs. - for _, role := range roleManifest.Roles { - for _, roleJob := range role.RoleJobs { - for _, property := range roleJob.Properties { + for _, role := range roleManifest.InstanceGroups { + for _, jobReference := range role.JobReferences { + for _, property := range jobReference.Properties { propertyName := fmt.Sprintf("properties.%s", property.Name) if template, ok := role.Configuration.Templates[propertyName]; ok { @@ -1147,21 +1146,21 @@ func validateTemplateUsage(roleManifest *RoleManifest) validation.ErrorList { // See also 'GetVariablesForRole' (mustache.go), and LoadRoleManifest (caller, this file) declaredConfigs := MakeMapOfVariables(roleManifest) - // Iterate over all roles, jobs, templates, extract the used + // Iterate over all instance groups, jobs, templates, extract the used // variables. Report all without a declaration. - for _, role := range roleManifest.Roles { + for _, instanceGroup := range roleManifest.InstanceGroups { // Note, we cannot use GetVariablesForRole here // because it will abort on bad templates. Here we // have to ignore them (no sensible variable // references) and continue to check everything else. - for _, roleJob := range role.RoleJobs { - for _, property := range roleJob.Properties { + for _, jobReference := range instanceGroup.JobReferences { + for _, property := range jobReference.Properties { propertyName := fmt.Sprintf("properties.%s", property.Name) - if template, ok := role.Configuration.Templates[propertyName]; ok { + if template, ok := instanceGroup.Configuration.Templates[propertyName]; ok { varsInTemplate, err := parseTemplate(template) if err != nil { continue @@ -1214,47 +1213,47 @@ func validateTemplateUsage(roleManifest *RoleManifest) validation.ErrorList { // validateRoleRun tests whether required fields in the RoleRun are // set. Note, some of the fields have type-dependent checks. Some // issues are fixed silently. -func validateRoleRun(role *Role, roleManifest *RoleManifest, declared CVMap) validation.ErrorList { +func validateRoleRun(instanceGroup *InstanceGroup, roleManifest *RoleManifest, declared CVMap) validation.ErrorList { allErrs := validation.ErrorList{} - if role.Run == nil { + if instanceGroup.Run == nil { return append(allErrs, validation.Required( - fmt.Sprintf("roles[%s].run", role.Name), "")) + fmt.Sprintf("instance_groups[%s].run", instanceGroup.Name), "")) } - if role.Run.Scaling != nil && role.Run.Scaling.HA == 0 { - role.Run.Scaling.HA = role.Run.Scaling.Min + if instanceGroup.Run.Scaling != nil && instanceGroup.Run.Scaling.HA == 0 { + instanceGroup.Run.Scaling.HA = instanceGroup.Run.Scaling.Min } - allErrs = append(allErrs, normalizeFlightStage(role)...) - allErrs = append(allErrs, validateHealthCheck(role)...) - allErrs = append(allErrs, validateRoleMemory(role)...) - allErrs = append(allErrs, validateRoleCPU(role)...) + allErrs = append(allErrs, normalizeFlightStage(instanceGroup)...) + allErrs = append(allErrs, validateHealthCheck(instanceGroup)...) + allErrs = append(allErrs, validateRoleMemory(instanceGroup)...) + allErrs = append(allErrs, validateRoleCPU(instanceGroup)...) - for _, exposedPort := range role.Run.ExposedPorts { - allErrs = append(allErrs, ValidateExposedPorts(role.Name, exposedPort)...) + for _, exposedPort := range instanceGroup.Run.ExposedPorts { + allErrs = append(allErrs, ValidateExposedPorts(instanceGroup.Name, exposedPort)...) } - if role.Run.ServiceAccount != "" { - accountName := role.Run.ServiceAccount + if instanceGroup.Run.ServiceAccount != "" { + accountName := instanceGroup.Run.ServiceAccount if _, ok := roleManifest.Configuration.Authorization.Accounts[accountName]; !ok { allErrs = append(allErrs, validation.NotFound( - fmt.Sprintf("roles[%s].run.service-account", role.Name), accountName)) + fmt.Sprintf("instance_groups[%s].run.service-account", instanceGroup.Name), accountName)) } } // Backwards compat: convert separate volume lists to the centralized one - for _, persistentVolume := range role.Run.PersistentVolumes { + for _, persistentVolume := range instanceGroup.Run.PersistentVolumes { persistentVolume.Type = VolumeTypePersistent - role.Run.Volumes = append(role.Run.Volumes, persistentVolume) + instanceGroup.Run.Volumes = append(instanceGroup.Run.Volumes, persistentVolume) } - for _, sharedVolume := range role.Run.SharedVolumes { + for _, sharedVolume := range instanceGroup.Run.SharedVolumes { sharedVolume.Type = VolumeTypeShared - role.Run.Volumes = append(role.Run.Volumes, sharedVolume) + instanceGroup.Run.Volumes = append(instanceGroup.Run.Volumes, sharedVolume) } - role.Run.PersistentVolumes = nil - role.Run.SharedVolumes = nil - for _, volume := range role.Run.Volumes { + instanceGroup.Run.PersistentVolumes = nil + instanceGroup.Run.SharedVolumes = nil + for _, volume := range instanceGroup.Run.Volumes { switch volume.Type { case VolumeTypePersistent: case VolumeTypeShared: @@ -1263,7 +1262,7 @@ func validateRoleRun(role *Role, roleManifest *RoleManifest, declared CVMap) val case VolumeTypeEmptyDir: default: allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.volumes[%s]", role.Name, volume.Tag), + fmt.Sprintf("instance_groups[%s].run.volumes[%s]", instanceGroup.Name, volume.Tag), volume.Type, fmt.Sprintf("Invalid volume type '%s'", volume.Type))) } @@ -1271,34 +1270,34 @@ func validateRoleRun(role *Role, roleManifest *RoleManifest, declared CVMap) val // Normalize capabilities to upper case, if any. var capabilities []string - for _, cap := range role.Run.Capabilities { + for _, cap := range instanceGroup.Run.Capabilities { capabilities = append(capabilities, strings.ToUpper(cap)) } - role.Run.Capabilities = capabilities + instanceGroup.Run.Capabilities = capabilities - if len(role.Run.Environment) == 0 { + if len(instanceGroup.Run.Environment) == 0 { return allErrs } - if role.Type == RoleTypeDocker { + if instanceGroup.Type == RoleTypeDocker { // The environment variables used by docker roles must // all be declared, report those which are not. - for _, envVar := range role.Run.Environment { + for _, envVar := range instanceGroup.Run.Environment { if _, ok := declared[envVar]; ok { continue } allErrs = append(allErrs, validation.NotFound( - fmt.Sprintf("roles[%s].run.env", role.Name), + fmt.Sprintf("instance_groups[%s].run.env", instanceGroup.Name), fmt.Sprintf("No variable declaration of '%s'", envVar))) } } else { - // Bosh roles must not provide environment variables. + // Bosh instance groups must not provide environment variables. allErrs = append(allErrs, validation.Forbidden( - fmt.Sprintf("roles[%s].run.env", role.Name), - "Non-docker role declares bogus parameters")) + fmt.Sprintf("instance_groups[%s].run.env", instanceGroup.Name), + "Non-docker instance group declares bogus parameters")) } return allErrs @@ -1309,7 +1308,7 @@ func validateRoleRun(role *Role, roleManifest *RoleManifest, declared CVMap) val func ValidateExposedPorts(name string, exposedPorts *RoleRunExposedPort) validation.ErrorList { allErrs := validation.ErrorList{} - fieldName := fmt.Sprintf("roles[%s].run.exposed-ports[%s]", name, exposedPorts.Name) + fieldName := fmt.Sprintf("instance_groups[%s].run.exposed-ports[%s]", name, exposedPorts.Name) // Validate Name if exposedPorts.Name == "" { @@ -1377,34 +1376,34 @@ func ValidateExposedPorts(name string, exposedPorts *RoleRunExposedPort) validat // validateRoleMemory validates memory requests and limits, and // converts the old key (`memory`, run.MemRequest), to the new // form. Afterward only run.Memory is valid. -func validateRoleMemory(role *Role) validation.ErrorList { +func validateRoleMemory(instanceGroup *InstanceGroup) validation.ErrorList { allErrs := validation.ErrorList{} - if role.Run.Memory == nil { - if role.Run.MemRequest != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeField(*role.Run.MemRequest, - fmt.Sprintf("roles[%s].run.memory", role.Name))...) + if instanceGroup.Run.Memory == nil { + if instanceGroup.Run.MemRequest != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeField(*instanceGroup.Run.MemRequest, + fmt.Sprintf("instance_groups[%s].run.memory", instanceGroup.Name))...) } - role.Run.Memory = &RoleRunMemory{Request: role.Run.MemRequest} + instanceGroup.Run.Memory = &RoleRunMemory{Request: instanceGroup.Run.MemRequest} return allErrs } // assert: role.Run.Memory != nil - if role.Run.Memory.Request == nil { - if role.Run.MemRequest != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeField(*role.Run.MemRequest, - fmt.Sprintf("roles[%s].run.memory", role.Name))...) + if instanceGroup.Run.Memory.Request == nil { + if instanceGroup.Run.MemRequest != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeField(*instanceGroup.Run.MemRequest, + fmt.Sprintf("instance_groups[%s].run.memory", instanceGroup.Name))...) } - role.Run.Memory.Request = role.Run.MemRequest + instanceGroup.Run.Memory.Request = instanceGroup.Run.MemRequest } else { - allErrs = append(allErrs, validation.ValidateNonnegativeField(*role.Run.Memory.Request, - fmt.Sprintf("roles[%s].run.mem.request", role.Name))...) + allErrs = append(allErrs, validation.ValidateNonnegativeField(*instanceGroup.Run.Memory.Request, + fmt.Sprintf("instance_groups[%s].run.mem.request", instanceGroup.Name))...) } - if role.Run.Memory.Limit != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeField(*role.Run.Memory.Limit, - fmt.Sprintf("roles[%s].run.mem.limit", role.Name))...) + if instanceGroup.Run.Memory.Limit != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeField(*instanceGroup.Run.Memory.Limit, + fmt.Sprintf("instance_groups[%s].run.mem.limit", instanceGroup.Name))...) } return allErrs @@ -1413,67 +1412,67 @@ func validateRoleMemory(role *Role) validation.ErrorList { // validateRoleCPU validates cpu requests and limits, and converts the // old key (`virtual-cpus`, run.VirtualCPUs), to the new // form. Afterward only run.CPU is valid. -func validateRoleCPU(role *Role) validation.ErrorList { +func validateRoleCPU(instanceGroup *InstanceGroup) validation.ErrorList { allErrs := validation.ErrorList{} - if role.Run.CPU == nil { - if role.Run.VirtualCPUs != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*role.Run.VirtualCPUs, - fmt.Sprintf("roles[%s].run.virtual-cpus", role.Name))...) + if instanceGroup.Run.CPU == nil { + if instanceGroup.Run.VirtualCPUs != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*instanceGroup.Run.VirtualCPUs, + fmt.Sprintf("instance_groups[%s].run.virtual-cpus", instanceGroup.Name))...) } - role.Run.CPU = &RoleRunCPU{Request: role.Run.VirtualCPUs} + instanceGroup.Run.CPU = &RoleRunCPU{Request: instanceGroup.Run.VirtualCPUs} return allErrs } // assert: role.Run.CPU != nil - if role.Run.CPU.Request == nil { - if role.Run.VirtualCPUs != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*role.Run.VirtualCPUs, - fmt.Sprintf("roles[%s].run.virtual-cpus", role.Name))...) + if instanceGroup.Run.CPU.Request == nil { + if instanceGroup.Run.VirtualCPUs != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*instanceGroup.Run.VirtualCPUs, + fmt.Sprintf("instance_groups[%s].run.virtual-cpus", instanceGroup.Name))...) } - role.Run.CPU.Request = role.Run.VirtualCPUs + instanceGroup.Run.CPU.Request = instanceGroup.Run.VirtualCPUs } else { - allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*role.Run.CPU.Request, - fmt.Sprintf("roles[%s].run.cpu.request", role.Name))...) + allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*instanceGroup.Run.CPU.Request, + fmt.Sprintf("instance_groups[%s].run.cpu.request", instanceGroup.Name))...) } - if role.Run.CPU.Limit != nil { - allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*role.Run.CPU.Limit, - fmt.Sprintf("roles[%s].run.cpu.limit", role.Name))...) + if instanceGroup.Run.CPU.Limit != nil { + allErrs = append(allErrs, validation.ValidateNonnegativeFieldFloat(*instanceGroup.Run.CPU.Limit, + fmt.Sprintf("instance_groups[%s].run.cpu.limit", instanceGroup.Name))...) } return allErrs } -// validateHealthCheck reports a role with conflicting health -// checks in its probes -func validateHealthCheck(role *Role) validation.ErrorList { +// validateHealthCheck reports a instance group with conflicting health checks +// in its probes +func validateHealthCheck(instanceGroup *InstanceGroup) validation.ErrorList { allErrs := validation.ErrorList{} - if role.Run.HealthCheck == nil { + if instanceGroup.Run.HealthCheck == nil { // No health checks, nothing to validate return allErrs } // Ensure that we don't have conflicting health checks - if role.Run.HealthCheck.Readiness != nil { + if instanceGroup.Run.HealthCheck.Readiness != nil { allErrs = append(allErrs, - validateHealthProbe(role, "readiness", - role.Run.HealthCheck.Readiness)...) + validateHealthProbe(instanceGroup, "readiness", + instanceGroup.Run.HealthCheck.Readiness)...) } - if role.Run.HealthCheck.Liveness != nil { + if instanceGroup.Run.HealthCheck.Liveness != nil { allErrs = append(allErrs, - validateHealthProbe(role, "liveness", - role.Run.HealthCheck.Liveness)...) + validateHealthProbe(instanceGroup, "liveness", + instanceGroup.Run.HealthCheck.Liveness)...) } return allErrs } -// validateHealthProbe reports a role with conflicting health checks +// validateHealthProbe reports a instance group with conflicting health checks // in the specified probe. -func validateHealthProbe(role *Role, probeName string, probe *HealthProbe) validation.ErrorList { +func validateHealthProbe(instanceGroup *InstanceGroup, probeName string, probe *HealthProbe) validation.ErrorList { allErrs := validation.ErrorList{} checks := make([]string, 0, 3) @@ -1488,66 +1487,66 @@ func validateHealthProbe(role *Role, probeName string, probe *HealthProbe) valid } if len(checks) > 1 { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.healthcheck.%s", role.Name, probeName), + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s", instanceGroup.Name, probeName), checks, "Expected at most one of url, command, or port")) } - switch role.Type { + switch instanceGroup.Type { case RoleTypeBosh: if len(checks) == 0 { allErrs = append(allErrs, validation.Required( - fmt.Sprintf("roles[%s].run.healthcheck.%s.command", role.Name, probeName), + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s.command", instanceGroup.Name, probeName), "Health check requires a command")) } else if checks[0] != "command" { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.healthcheck.%s", role.Name, probeName), - checks, "Only command health checks are supported for BOSH roles")) + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s", instanceGroup.Name, probeName), + checks, "Only command health checks are supported for BOSH instance groups")) } else if probeName != "readiness" && len(probe.Command) > 1 { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.healthcheck.%s.command", role.Name, probeName), + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s.command", instanceGroup.Name, probeName), probe.Command, fmt.Sprintf("%s check can only have one command", probeName))) } case RoleTypeBoshTask: if len(checks) > 0 { allErrs = append(allErrs, validation.Forbidden( - fmt.Sprintf("roles[%s].run.healthcheck.%s", role.Name, probeName), - "bosh-task roles cannot have health checks")) + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s", instanceGroup.Name, probeName), + "bosh-task instance groups cannot have health checks")) } case RoleTypeDocker: if len(probe.Command) > 1 { allErrs = append(allErrs, validation.Forbidden( - fmt.Sprintf("roles[%s].run.healthcheck.%s", role.Name, probeName), - "docker roles do not support multiple commands")) + fmt.Sprintf("instance_groups[%s].run.healthcheck.%s", instanceGroup.Name, probeName), + "docker instance groups do not support multiple commands")) } default: // We should have caught the invalid role type when loading the role manifest - panic("Unexpected role type " + string(role.Type) + " in role " + role.Name) + panic("Unexpected role type " + string(instanceGroup.Type) + " in instance group " + instanceGroup.Name) } return allErrs } -// normalizeFlightStage reports roles with a bad flightstage, and -// fixes all roles without a flight stage to use the default +// normalizeFlightStage reports instance groups with a bad flightstage, and +// fixes all instance groups without a flight stage to use the default // ('flight'). -func normalizeFlightStage(role *Role) validation.ErrorList { +func normalizeFlightStage(instanceGroup *InstanceGroup) validation.ErrorList { allErrs := validation.ErrorList{} // Normalize flight stage - switch role.Run.FlightStage { + switch instanceGroup.Run.FlightStage { case "": - role.Run.FlightStage = FlightStageFlight + instanceGroup.Run.FlightStage = FlightStageFlight case FlightStagePreFlight: case FlightStageFlight: case FlightStagePostFlight: case FlightStageManual: default: allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].run.flight-stage", role.Name), - role.Run.FlightStage, + fmt.Sprintf("instance_groups[%s].run.flight-stage", instanceGroup.Name), + instanceGroup.Run.FlightStage, "Expected one of flight, manual, post-flight, or pre-flight")) } @@ -1597,16 +1596,16 @@ func validateServiceAccounts(roleManifest *RoleManifest) validation.ErrorList { func validateUnusedColocatedContainerRoles(RoleManifest *RoleManifest) validation.ErrorList { counterMap := map[string]int{} - for _, role := range RoleManifest.Roles { - // Initialise any role of type colocated container in the counter map - if role.Type == RoleTypeColocatedContainer { - if _, ok := counterMap[role.Name]; !ok { - counterMap[role.Name] = 0 + for _, instanceGroup := range RoleManifest.InstanceGroups { + // Initialise any instance group of type colocated container in the counter map + if instanceGroup.Type == RoleTypeColocatedContainer { + if _, ok := counterMap[instanceGroup.Name]; !ok { + counterMap[instanceGroup.Name] = 0 } } // Increase counter of configured colocated container names - for _, roleName := range role.ColocatedContainers { + for _, roleName := range instanceGroup.ColocatedContainers { if _, ok := counterMap[roleName]; !ok { counterMap[roleName] = 0 } @@ -1619,8 +1618,8 @@ func validateUnusedColocatedContainerRoles(RoleManifest *RoleManifest) validatio for roleName, usageCount := range counterMap { if usageCount == 0 { allErrs = append(allErrs, validation.NotFound( - fmt.Sprintf("role[%s]", roleName), - "role is of type colocated container, but is not used by any other role as such")) + fmt.Sprintf("instance_group[%s]", roleName), + "instance group is of type colocated container, but is not used by any other instance group as such")) } } @@ -1630,14 +1629,14 @@ func validateUnusedColocatedContainerRoles(RoleManifest *RoleManifest) validatio func validateColocatedContainerPortCollisions(RoleManifest *RoleManifest) validation.ErrorList { allErrs := validation.ErrorList{} - for _, role := range RoleManifest.Roles { - if len(role.ColocatedContainers) > 0 { + for _, instanceGroup := range RoleManifest.InstanceGroups { + if len(instanceGroup.ColocatedContainers) > 0 { lookupMap := map[string][]string{} - // Iterate over this role and all colocated container roles and store the - // names of the roles for each protocol/port (there should be no list with + // Iterate over this instance group and all colocated container instance groups and store the + // names of the groups for each protocol/port (there should be no list with // more than one entry) - for _, toBeChecked := range append([]*Role{role}, role.GetColocatedRoles()...) { + for _, toBeChecked := range append([]*InstanceGroup{instanceGroup}, instanceGroup.GetColocatedRoles()...) { for _, exposedPort := range toBeChecked.Run.ExposedPorts { for i := 0; i < exposedPort.Count; i++ { protocolPortTuple := fmt.Sprintf("%s/%d", exposedPort.Protocol, exposedPort.ExternalPort+i) @@ -1663,7 +1662,7 @@ func validateColocatedContainerPortCollisions(RoleManifest *RoleManifest) valida if len(names) > 1 { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("role[%s]", role.Name), + fmt.Sprintf("instance_group[%s]", instanceGroup.Name), protocolPortTuple, fmt.Sprintf("port collision, the same protocol/port is used by: %s", strings.Join(names, ", ")))) } @@ -1674,7 +1673,7 @@ func validateColocatedContainerPortCollisions(RoleManifest *RoleManifest) valida return allErrs } -func validateRoleTags(role *Role) validation.ErrorList { +func validateRoleTags(instanceGroup *InstanceGroup) validation.ErrorList { var allErrs validation.ErrorList acceptableRoleTypes := map[RoleTag][]RoleType{ @@ -1684,41 +1683,41 @@ func validateRoleTags(role *Role) validation.ErrorList { RoleTagStopOnFailure: []RoleType{RoleTypeBoshTask}, } - for tagNum, tag := range role.Tags { + for tagNum, tag := range instanceGroup.Tags { switch tag { case RoleTagStopOnFailure: case RoleTagSequentialStartup: case RoleTagHeadless: case RoleTagActivePassive: - if role.Run == nil || role.Run.ActivePassiveProbe == "" { + if instanceGroup.Run == nil || instanceGroup.Run.ActivePassiveProbe == "" { allErrs = append(allErrs, validation.Required( - fmt.Sprintf("roles[%s].run.active-passive-probe", role.Name), - "active-passive roles must specify the correct probe")) + fmt.Sprintf("instance_groups[%s].run.active-passive-probe", instanceGroup.Name), + "active-passive instance groups must specify the correct probe")) } - if role.HasTag(RoleTagHeadless) { + if instanceGroup.HasTag(RoleTagHeadless) { allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].tags[%d]", role.Name, tagNum), + fmt.Sprintf("instance_groups[%s].tags[%d]", instanceGroup.Name, tagNum), tag, - "headless roles may not be active-passive")) + "headless instance groups may not be active-passive")) } default: allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].tags[%d]", role.Name, tagNum), + fmt.Sprintf("instance_groups[%s].tags[%d]", instanceGroup.Name, tagNum), string(tag), "Unknown tag")) continue } if _, ok := acceptableRoleTypes[tag]; !ok { allErrs = append(allErrs, validation.InternalError( - fmt.Sprintf("roles[%s].tags[%d]", role.Name, tagNum), + fmt.Sprintf("instance_groups[%s].tags[%d]", instanceGroup.Name, tagNum), fmt.Errorf("Tag %s has no acceptable role list", tag))) continue } validTypeForTag := false for _, roleType := range acceptableRoleTypes[tag] { - if roleType == role.Type { + if roleType == instanceGroup.Type { validTypeForTag = true break } @@ -1729,9 +1728,9 @@ func validateRoleTags(role *Role) validation.ErrorList { roleNames = append(roleNames, string(roleType)) } allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("roles[%s].tags[%d]", role.Name, tagNum), + fmt.Sprintf("instance_groups[%s].tags[%d]", instanceGroup.Name, tagNum), tag, - fmt.Sprintf("%s tag is only supported in [%s] roles, not %s", tag, strings.Join(roleNames, ", "), role.Type))) + fmt.Sprintf("%s tag is only supported in [%s] instance groups, not %s", tag, strings.Join(roleNames, ", "), instanceGroup.Type))) } } @@ -1741,16 +1740,16 @@ func validateRoleTags(role *Role) validation.ErrorList { func validateColocatedContainerVolumeShares(RoleManifest *RoleManifest) validation.ErrorList { allErrs := validation.ErrorList{} - for _, role := range RoleManifest.Roles { - numberOfColocatedContainers := len(role.ColocatedContainers) + for _, instanceGroup := range RoleManifest.InstanceGroups { + numberOfColocatedContainers := len(instanceGroup.ColocatedContainers) if numberOfColocatedContainers > 0 { emptyDirVolumesTags := []string{} emptyDirVolumesPath := map[string]string{} emptyDirVolumesCount := map[string]int{} - // Compile a map of all emptyDir volumes with tag -> path of the main role - for _, volume := range role.Run.Volumes { + // Compile a map of all emptyDir volumes with tag -> path of the main instance group + for _, volume := range instanceGroup.Run.Volumes { if volume.Type == VolumeTypeEmptyDir { emptyDirVolumesTags = append(emptyDirVolumesTags, volume.Tag) emptyDirVolumesPath[volume.Tag] = volume.Path @@ -1758,7 +1757,7 @@ func validateColocatedContainerVolumeShares(RoleManifest *RoleManifest) validati } } - for _, colocatedRole := range role.GetColocatedRoles() { + for _, colocatedRole := range instanceGroup.GetColocatedRoles() { for _, volume := range colocatedRole.Run.Volumes { if volume.Type == VolumeTypeEmptyDir { if _, ok := emptyDirVolumesCount[volume.Tag]; !ok { @@ -1771,9 +1770,9 @@ func validateColocatedContainerVolumeShares(RoleManifest *RoleManifest) validati if path != volume.Path { // Same tag, but different paths allErrs = append(allErrs, validation.Invalid( - fmt.Sprintf("role[%s]", colocatedRole.Name), + fmt.Sprintf("instance_group[%s]", colocatedRole.Name), volume.Path, - fmt.Sprintf("colocated role specifies a shared volume with tag %s, which path does not match the path of the main role shared volume with the same tag", volume.Tag))) + fmt.Sprintf("colocated instance group specifies a shared volume with tag %s, which path does not match the path of the main instance group shared volume with the same tag", volume.Tag))) } } } @@ -1786,8 +1785,8 @@ func validateColocatedContainerVolumeShares(RoleManifest *RoleManifest) validati count := emptyDirVolumesCount[tag] if count != numberOfColocatedContainers { allErrs = append(allErrs, validation.Required( - fmt.Sprintf("role[%s]", role.Name), - fmt.Sprintf("container must use shared volumes of the main role: %s", tag))) + fmt.Sprintf("instance_group[%s]", instanceGroup.Name), + fmt.Sprintf("container must use shared volumes of the main instance group: %s", tag))) } } } @@ -1797,19 +1796,19 @@ func validateColocatedContainerVolumeShares(RoleManifest *RoleManifest) validati } // LookupJob will find the given job in this role, or nil if not found -func (r *Role) LookupJob(name string) *RoleJob { - for _, roleJob := range r.RoleJobs { - if roleJob.Job.Name == name { - return roleJob +func (g *InstanceGroup) LookupJob(name string) *JobReference { + for _, jobReference := range g.JobReferences { + if jobReference.Job.Name == name { + return jobReference } } return nil } -// IsPrivileged tests if the role capabilities enable fully privileged +// IsPrivileged tests if the instance group capabilities enable fully privileged // mode. -func (r *Role) IsPrivileged() bool { - for _, cap := range r.Run.Capabilities { +func (g *InstanceGroup) IsPrivileged() bool { + for _, cap := range g.Run.Capabilities { if cap == "ALL" { return true } @@ -1817,17 +1816,17 @@ func (r *Role) IsPrivileged() bool { return false } -// IsColocatedContainerRole tests if the role is of type ColocatedContainer, or +// IsColocated tests if the role is of type ColocatedContainer, or // not. It returns true if this role is of that type, or false otherwise. -func (r *Role) IsColocatedContainerRole() bool { - return r.Type == RoleTypeColocatedContainer +func (g *InstanceGroup) IsColocated() bool { + return g.Type == RoleTypeColocatedContainer } -// GetColocatedRoles lists all colocation roles references by this role -func (r *Role) GetColocatedRoles() []*Role { - result := make([]*Role, len(r.ColocatedContainers)) - for i, name := range r.ColocatedContainers { - if role := r.roleManifest.LookupRole(name); role != nil { +// GetColocatedRoles lists all colocation roles references by this instance group +func (g *InstanceGroup) GetColocatedRoles() []*InstanceGroup { + result := make([]*InstanceGroup, len(g.ColocatedContainers)) + for i, name := range g.ColocatedContainers { + if role := g.roleManifest.LookupInstanceGroup(name); role != nil { result[i] = role } } diff --git a/model/roles_test.go b/model/roles_test.go index 6dfebd35..4b37dc6a 100644 --- a/model/roles_test.go +++ b/model/roles_test.go @@ -29,16 +29,16 @@ func TestLoadRoleManifestOK(t *testing.T) { require.NotNil(t, roleManifest) assert.Equal(t, roleManifestPath, roleManifest.manifestFilePath) - assert.Len(t, roleManifest.Roles, 2) + assert.Len(t, roleManifest.InstanceGroups, 2) - myrole := roleManifest.Roles[0] + myrole := roleManifest.InstanceGroups[0] assert.Equal(t, []string{ "myrole.sh", "/script/with/absolute/path.sh", }, myrole.Scripts) - foorole := roleManifest.Roles[1] - torjob := foorole.RoleJobs[0] + foorole := roleManifest.InstanceGroups[1] + torjob := foorole.JobReferences[0] assert.Equal(t, "tor", torjob.Name) assert.NotNil(t, torjob.Release) assert.Equal(t, "tor", torjob.Release.Name) @@ -58,7 +58,7 @@ func TestGetScriptPaths(t *testing.T) { assert.NoError(t, err) require.NotNil(t, roleManifest) - fullScripts := roleManifest.Roles[0].GetScriptPaths() + fullScripts := roleManifest.InstanceGroups[0].GetScriptPaths() assert.Len(t, fullScripts, 3) for _, leafName := range []string{"environ.sh", "myrole.sh", "post_config_script.sh"} { assert.Equal(t, filepath.Join(workDir, "../test-assets/role-manifests/model", leafName), fullScripts[leafName]) @@ -118,14 +118,14 @@ func TestLoadRoleManifestMultipleReleasesOK(t *testing.T) { require.NotNil(t, roleManifest) assert.Equal(t, roleManifestPath, roleManifest.manifestFilePath) - assert.Len(t, roleManifest.Roles, 2) + assert.Len(t, roleManifest.InstanceGroups, 2) - myrole := roleManifest.Roles[0] + myrole := roleManifest.InstanceGroups[0] assert.Len(t, myrole.Scripts, 1) assert.Equal(t, "myrole.sh", myrole.Scripts[0]) - foorole := roleManifest.Roles[1] - torjob := foorole.RoleJobs[0] + foorole := roleManifest.InstanceGroups[1] + torjob := foorole.JobReferences[0] assert.Equal(t, "tor", torjob.Name) if assert.NotNil(t, torjob.Release) { assert.Equal(t, "tor", torjob.Release.Name) @@ -151,7 +151,7 @@ func TestLoadRoleManifestMultipleReleasesNotOk(t *testing.T) { if assert.Error(t, err) { assert.Contains(t, err.Error(), - `roles[foorole].jobs[ntpd]: Invalid value: "foo": Referenced release is not loaded`) + `instance_groups[foorole].jobs[ntpd]: Invalid value: "foo": Referenced release is not loaded`) } } @@ -187,12 +187,12 @@ func TestRoleManifestTagList(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") - roleManifest.Roles[0].Type = roleType - roleManifest.Roles[0].Tags = []RoleTag{RoleTag(tag)} + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") + roleManifest.InstanceGroups[0].Type = roleType + roleManifest.InstanceGroups[0].Tags = []RoleTag{RoleTag(tag)} if RoleTag(tag) == RoleTagActivePassive { // An active/passive probe is required when tagged as active/passive - roleManifest.Roles[0].Run.ActivePassiveProbe = "hello" + roleManifest.InstanceGroups[0].Run.ActivePassiveProbe = "hello" } err = roleManifest.resolveRoleManifest([]*Release{release}, nil) acceptable := false @@ -210,12 +210,12 @@ func TestRoleManifestTagList(t *testing.T) { for _, acceptableRoleType := range acceptableRoleTypes { roleNames = append(roleNames, string(acceptableRoleType)) } - message = fmt.Sprintf("%s tag is only supported in [%s] roles, not %s", + message = fmt.Sprintf("%s tag is only supported in [%s] instance groups, not %s", tag, strings.Join(roleNames, ", "), roleType) } - fullMessage := fmt.Sprintf(`roles[myrole].tags[0]: Invalid value: "%s": %s`, tag, message) + fullMessage := fmt.Sprintf(`instance_groups[myrole].tags[0]: Invalid value: "%s": %s`, tag, message) assert.EqualError(t, err, fullMessage) } }) @@ -239,35 +239,35 @@ func TestNonBoshRolesAreIgnoredOK(t *testing.T) { require.NotNil(t, roleManifest) assert.Equal(t, roleManifestPath, roleManifest.manifestFilePath) - assert.Len(t, roleManifest.Roles, 2) + assert.Len(t, roleManifest.InstanceGroups, 2) } func TestRolesSort(t *testing.T) { assert := assert.New(t) - roles := Roles{ + instanceGroups := InstanceGroups{ {Name: "aaa"}, {Name: "bbb"}, } - sort.Sort(roles) - assert.Equal(roles[0].Name, "aaa") - assert.Equal(roles[1].Name, "bbb") + sort.Sort(instanceGroups) + assert.Equal(instanceGroups[0].Name, "aaa") + assert.Equal(instanceGroups[1].Name, "bbb") - roles = Roles{ + instanceGroups = InstanceGroups{ {Name: "ddd"}, {Name: "ccc"}, } - sort.Sort(roles) - assert.Equal(roles[0].Name, "ccc") - assert.Equal(roles[1].Name, "ddd") + sort.Sort(instanceGroups) + assert.Equal(instanceGroups[0].Name, "ccc") + assert.Equal(instanceGroups[1].Name, "ddd") } func TestGetScriptSignatures(t *testing.T) { assert := assert.New(t) - refRole := &Role{ + refRole := &InstanceGroup{ Name: "bbb", - RoleJobs: []*RoleJob{ + JobReferences: []*JobReference{ { Job: &Job{ SHA1: "Role 2 Job 1", @@ -300,10 +300,10 @@ func TestGetScriptSignatures(t *testing.T) { err = ioutil.WriteFile(scriptPath, []byte("true\n"), 0644) assert.NoError(err) - differentPatch := &Role{ - Name: refRole.Name, - RoleJobs: []*RoleJob{refRole.RoleJobs[0], refRole.RoleJobs[1]}, - Scripts: []string{scriptName}, + differentPatch := &InstanceGroup{ + Name: refRole.Name, + JobReferences: []*JobReference{refRole.JobReferences[0], refRole.JobReferences[1]}, + Scripts: []string{scriptName}, roleManifest: &RoleManifest{ manifestFilePath: releasePath, }, @@ -322,17 +322,17 @@ func TestGetScriptSignatures(t *testing.T) { func TestGetTemplateSignatures(t *testing.T) { assert := assert.New(t) - differentTemplate1 := &Role{ - Name: "aaa", - RoleJobs: []*RoleJob{}, + differentTemplate1 := &InstanceGroup{ + Name: "aaa", + JobReferences: []*JobReference{}, Configuration: &Configuration{ Templates: map[string]string{"foo": "bar"}, }, } - differentTemplate2 := &Role{ - Name: "aaa", - RoleJobs: []*RoleJob{}, + differentTemplate2 := &InstanceGroup{ + Name: "aaa", + JobReferences: []*JobReference{}, Configuration: &Configuration{ Templates: map[string]string{"bat": "baz"}, }, @@ -474,7 +474,7 @@ func TestLoadRoleManifestRunEnvDocker(t *testing.T) { roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/docker-run-env.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{release}, nil) - assert.EqualError(t, err, `roles[dockerrole].run.env: Not found: "No variable declaration of 'UNKNOWN'"`) + assert.EqualError(t, err, `instance_groups[dockerrole].run.env: Not found: "No variable declaration of 'UNKNOWN'"`) assert.Nil(t, roleManifest) } @@ -489,7 +489,7 @@ func TestLoadRoleManifestMissingRBACAccount(t *testing.T) { roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/rbac-missing-account.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{release}, nil) - assert.EqualError(t, err, `roles[myrole].run.service-account: Not found: "missing-account"`) + assert.EqualError(t, err, `instance_groups[myrole].run.service-account: Not found: "missing-account"`) assert.Nil(t, roleManifest) } @@ -527,40 +527,40 @@ func TestLoadRoleManifestRunGeneral(t *testing.T) { tests := []testCase{ { "bosh-run-missing.yml", []string{ - `roles[myrole].run: Required value`, + `instance_groups[myrole].run: Required value`, }, }, { "bosh-run-bad-proto.yml", []string{ - `roles[myrole].run.exposed-ports[https].protocol: Unsupported value: "AA": supported values: TCP, UDP`, + `instance_groups[myrole].run.exposed-ports[https].protocol: Unsupported value: "AA": supported values: TCP, UDP`, }, }, { "bosh-run-bad-port-names.yml", []string{ - `roles[myrole].run.exposed-ports[a--b].name: Invalid value: "a--b": port names must be lowercase words separated by hyphens`, - `roles[myrole].run.exposed-ports[abcd-efgh-ijkl-x].name: Invalid value: "abcd-efgh-ijkl-x": port name must be no more than 15 characters`, - `roles[myrole].run.exposed-ports[abcdefghij].name: Invalid value: "abcdefghij": user configurable port name must be no more than 9 characters`, + `instance_groups[myrole].run.exposed-ports[a--b].name: Invalid value: "a--b": port names must be lowercase words separated by hyphens`, + `instance_groups[myrole].run.exposed-ports[abcd-efgh-ijkl-x].name: Invalid value: "abcd-efgh-ijkl-x": port name must be no more than 15 characters`, + `instance_groups[myrole].run.exposed-ports[abcdefghij].name: Invalid value: "abcdefghij": user configurable port name must be no more than 9 characters`, }, }, { "bosh-run-bad-port-count.yml", []string{ - `roles[myrole].run.exposed-ports[http].count: Invalid value: 2: count doesn't match port range 80-82`, + `instance_groups[myrole].run.exposed-ports[http].count: Invalid value: 2: count doesn't match port range 80-82`, }, }, { "bosh-run-bad-ports.yml", []string{ - `roles[myrole].run.exposed-ports[https].internal: Invalid value: "-1": invalid syntax`, - `roles[myrole].run.exposed-ports[https].external: Invalid value: 0: must be between 1 and 65535, inclusive`, + `instance_groups[myrole].run.exposed-ports[https].internal: Invalid value: "-1": invalid syntax`, + `instance_groups[myrole].run.exposed-ports[https].external: Invalid value: 0: must be between 1 and 65535, inclusive`, }, }, { "bosh-run-missing-portrange.yml", []string{ - `roles[myrole].run.exposed-ports[https].internal: Invalid value: "": invalid syntax`, + `instance_groups[myrole].run.exposed-ports[https].internal: Invalid value: "": invalid syntax`, }, }, { "bosh-run-reverse-portrange.yml", []string{ - `roles[myrole].run.exposed-ports[https].internal: Invalid value: "5678-123": last port can't be lower than first port`, + `instance_groups[myrole].run.exposed-ports[https].internal: Invalid value: "5678-123": last port can't be lower than first port`, }, }, { @@ -569,23 +569,23 @@ func TestLoadRoleManifestRunGeneral(t *testing.T) { }, { "bosh-run-bad-parse.yml", []string{ - `roles[myrole].run.exposed-ports[https].internal: Invalid value: "qq": invalid syntax`, - `roles[myrole].run.exposed-ports[https].external: Invalid value: "aa": invalid syntax`, + `instance_groups[myrole].run.exposed-ports[https].internal: Invalid value: "qq": invalid syntax`, + `instance_groups[myrole].run.exposed-ports[https].external: Invalid value: "aa": invalid syntax`, }, }, { "bosh-run-bad-memory.yml", []string{ - `roles[myrole].run.memory: Invalid value: -10: must be greater than or equal to 0`, + `instance_groups[myrole].run.memory: Invalid value: -10: must be greater than or equal to 0`, }, }, { "bosh-run-bad-cpu.yml", []string{ - `roles[myrole].run.virtual-cpus: Invalid value: -2: must be greater than or equal to 0`, + `instance_groups[myrole].run.virtual-cpus: Invalid value: -2: must be greater than or equal to 0`, }, }, { "bosh-run-env.yml", []string{ - `roles[xrole].run.env: Forbidden: Non-docker role declares bogus parameters`, + `instance_groups[xrole].run.env: Forbidden: Non-docker instance group declares bogus parameters`, }, }, { @@ -645,7 +645,7 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { }, }, err: []string{ - `roles[myrole].run.healthcheck.readiness: Invalid value: ["url","command","port"]: Expected at most one of url, command, or port`, + `instance_groups[myrole].run.healthcheck.readiness: Invalid value: ["url","command","port"]: Expected at most one of url, command, or port`, }, }, { @@ -657,7 +657,7 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { }, }, err: []string{ - `roles[myrole].run.healthcheck.readiness: Forbidden: docker roles do not support multiple commands`, + `instance_groups[myrole].run.healthcheck.readiness: Forbidden: docker instance groups do not support multiple commands`, }, }, { @@ -669,7 +669,7 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { }, }, err: []string{ - `roles[myrole].run.healthcheck.readiness: Forbidden: bosh-task roles cannot have health checks`, + `instance_groups[myrole].run.healthcheck.readiness: Forbidden: bosh-task instance groups cannot have health checks`, }, }, { @@ -690,7 +690,7 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { }, }, err: []string{ - `roles[myrole].run.healthcheck.readiness: Invalid value: ["url"]: Only command health checks are supported for BOSH roles`, + `instance_groups[myrole].run.healthcheck.readiness: Invalid value: ["url"]: Only command health checks are supported for BOSH instance groups`, }, }, { @@ -702,7 +702,7 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { }, }, err: []string{ - `roles[myrole].run.healthcheck.liveness.command: Invalid value: ["hello","world"]: liveness check can only have one command`, + `instance_groups[myrole].run.healthcheck.liveness.command: Invalid value: ["hello","world"]: liveness check can only have one command`, }, }, } { @@ -713,11 +713,11 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") if sample.roleType != RoleType("") { - roleManifest.Roles[0].Type = sample.roleType + roleManifest.InstanceGroups[0].Type = sample.roleType } - roleManifest.Roles[0].Run = &RoleRun{ + roleManifest.InstanceGroups[0].Run = &RoleRun{ HealthCheck: &sample.healthCheck, } err = roleManifest.resolveRoleManifest([]*Release{release}, nil) @@ -736,16 +736,16 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") - roleManifest.Roles[0].Type = RoleTypeBosh - roleManifest.Roles[0].Tags = []RoleTag{} - roleManifest.Roles[0].Run = &RoleRun{ + roleManifest.InstanceGroups[0].Type = RoleTypeBosh + roleManifest.InstanceGroups[0].Tags = []RoleTag{} + roleManifest.InstanceGroups[0].Run = &RoleRun{ ActivePassiveProbe: "/bin/true", } err = roleManifest.resolveRoleManifest([]*Release{release}, nil) assert.EqualError(t, err, - `roles[myrole].run.active-passive-probe: Invalid value: "/bin/true": Active/passive probes are only valid on roles with active-passive tag`) + `instance_groups[myrole].run.active-passive-probe: Invalid value: "/bin/true": Active/passive probes are only valid on instance groups with active-passive tag`) }) t.Run("active/passive bosh role without a probe", func(t *testing.T) { @@ -754,14 +754,14 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") - roleManifest.Roles[0].Type = RoleTypeBosh - roleManifest.Roles[0].Tags = []RoleTag{RoleTagActivePassive} - roleManifest.Roles[0].Run = &RoleRun{} + roleManifest.InstanceGroups[0].Type = RoleTypeBosh + roleManifest.InstanceGroups[0].Tags = []RoleTag{RoleTagActivePassive} + roleManifest.InstanceGroups[0].Run = &RoleRun{} err = roleManifest.resolveRoleManifest([]*Release{release}, nil) assert.EqualError(t, err, - `roles[myrole].run.active-passive-probe: Required value: active-passive roles must specify the correct probe`) + `instance_groups[myrole].run.active-passive-probe: Required value: active-passive instance groups must specify the correct probe`) }) t.Run("bosh task tagged as active/passive", func(t *testing.T) { @@ -770,14 +770,14 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") - roleManifest.Roles[0].Type = RoleTypeBoshTask - roleManifest.Roles[0].Tags = []RoleTag{RoleTagActivePassive} - roleManifest.Roles[0].Run = &RoleRun{ActivePassiveProbe: "/bin/false"} + roleManifest.InstanceGroups[0].Type = RoleTypeBoshTask + roleManifest.InstanceGroups[0].Tags = []RoleTag{RoleTagActivePassive} + roleManifest.InstanceGroups[0].Run = &RoleRun{ActivePassiveProbe: "/bin/false"} err = roleManifest.resolveRoleManifest([]*Release{release}, nil) assert.EqualError(t, err, - `roles[myrole].tags[0]: Invalid value: "active-passive": active-passive tag is only supported in [bosh] roles, not bosh-task`) + `instance_groups[myrole].tags[0]: Invalid value: "active-passive": active-passive tag is only supported in [bosh] instance groups, not bosh-task`) }) t.Run("headless active/passive role", func(t *testing.T) { @@ -786,14 +786,14 @@ func TestLoadRoleManifestHealthChecks(t *testing.T) { err := yaml.Unmarshal(manifestContents, roleManifest) require.NoError(t, err, "Error unmarshalling role manifest") roleManifest.Configuration = &Configuration{Templates: map[string]string{}} - require.NotEmpty(t, roleManifest.Roles, "No roles loaded") + require.NotEmpty(t, roleManifest.InstanceGroups, "No instance groups loaded") - roleManifest.Roles[0].Type = RoleTypeBosh - roleManifest.Roles[0].Tags = []RoleTag{RoleTagHeadless, RoleTagActivePassive} - roleManifest.Roles[0].Run = &RoleRun{ActivePassiveProbe: "/bin/false"} + roleManifest.InstanceGroups[0].Type = RoleTypeBosh + roleManifest.InstanceGroups[0].Tags = []RoleTag{RoleTagHeadless, RoleTagActivePassive} + roleManifest.InstanceGroups[0].Run = &RoleRun{ActivePassiveProbe: "/bin/false"} err = roleManifest.resolveRoleManifest([]*Release{release}, nil) assert.EqualError(t, err, - `roles[myrole].tags[1]: Invalid value: "active-passive": headless roles may not be active-passive`) + `instance_groups[myrole].tags[1]: Invalid value: "active-passive": headless instance groups may not be active-passive`) }) } @@ -820,7 +820,7 @@ func TestResolveLinks(t *testing.T) { // LoadRoleManifest implicitly runs resolveLinks() - role := roleManifest.LookupRole("myrole") + role := roleManifest.LookupInstanceGroup("myrole") job := role.LookupJob("ntpd") if !assert.NotNil(t, job) { return @@ -959,10 +959,10 @@ func TestRoleResolveLinksMultipleProvider(t *testing.T) { } roleManifest := &RoleManifest{ - Roles: Roles{ - &Role{ + InstanceGroups: InstanceGroups{ + &InstanceGroup{ Name: "role-1", - RoleJobs: []*RoleJob{ + JobReferences: []*JobReference{ { Job: job1, ExportedProviders: map[string]jobProvidesInfo{ @@ -974,9 +974,9 @@ func TestRoleResolveLinksMultipleProvider(t *testing.T) { {Job: job2}, }, }, - &Role{ + &InstanceGroup{ Name: "role-2", - RoleJobs: []*RoleJob{ + JobReferences: []*JobReference{ {Job: job2}, { Job: job3, @@ -992,24 +992,24 @@ func TestRoleResolveLinksMultipleProvider(t *testing.T) { }, }, }, - &Role{ + &InstanceGroup{ Name: "role-3", // This does _not_ have an explicitly exported provider - RoleJobs: []*RoleJob{{Job: job2}, {Job: job3}}, + JobReferences: []*JobReference{{Job: job2}, {Job: job3}}, }, }, } - for _, r := range roleManifest.Roles { - for _, roleJob := range r.RoleJobs { - roleJob.Name = roleJob.Job.Name - if roleJob.ResolvedConsumers == nil { - roleJob.ResolvedConsumers = make(map[string]jobConsumesInfo) + for _, r := range roleManifest.InstanceGroups { + for _, jobReference := range r.JobReferences { + jobReference.Name = jobReference.Job.Name + if jobReference.ResolvedConsumers == nil { + jobReference.ResolvedConsumers = make(map[string]jobConsumesInfo) } } } errors := roleManifest.resolveLinks() assert.Empty(errors) - role := roleManifest.LookupRole("role-2") + role := roleManifest.LookupInstanceGroup("role-2") require.NotNil(role, "Failed to find role") job := role.LookupJob("job-3") require.NotNil(job, "Failed to find job") @@ -1083,9 +1083,9 @@ func TestWriteConfigs(t *testing.T) { }, } - role := &Role{ + role := &InstanceGroup{ Name: "dummy role", - RoleJobs: []*RoleJob{ + JobReferences: []*JobReference{ { Job: job, Name: "silly job", @@ -1114,7 +1114,7 @@ func TestWriteConfigs(t *testing.T) { assert.NoError(err) assert.NoError(tempFile.Close()) - json, err := role.RoleJobs[0].WriteConfigs(role, tempFile.Name(), tempFile.Name()) + json, err := role.JobReferences[0].WriteConfigs(role, tempFile.Name(), tempFile.Name()) assert.NoError(err) assert.JSONEq(` @@ -1163,13 +1163,13 @@ func TestLoadRoleManifestColocatedContainers(t *testing.T) { assert.NoError(err) assert.NotNil(roleManifest) - assert.Len(roleManifest.Roles, 2) - assert.EqualValues(RoleTypeBosh, roleManifest.LookupRole("main-role").Type) - assert.EqualValues(RoleTypeColocatedContainer, roleManifest.LookupRole("to-be-colocated").Type) - assert.Len(roleManifest.LookupRole("main-role").ColocatedContainers, 1) + assert.Len(roleManifest.InstanceGroups, 2) + assert.EqualValues(RoleTypeBosh, roleManifest.LookupInstanceGroup("main-role").Type) + assert.EqualValues(RoleTypeColocatedContainer, roleManifest.LookupInstanceGroup("to-be-colocated").Type) + assert.Len(roleManifest.LookupInstanceGroup("main-role").ColocatedContainers, 1) for _, roleName := range []string{"main-role", "to-be-colocated"} { - assert.EqualValues([]*RoleRunVolume{&RoleRunVolume{Path: "/var/vcap/store", Type: "emptyDir", Tag: "shared-data"}}, roleManifest.LookupRole(roleName).Run.Volumes) + assert.EqualValues([]*RoleRunVolume{&RoleRunVolume{Path: "/var/vcap/store", Type: "emptyDir", Tag: "shared-data"}}, roleManifest.LookupInstanceGroup(roleName).Run.Volumes) } } @@ -1190,7 +1190,7 @@ func TestLoadRoleManifestColocatedContainersValidationMissingRole(t *testing.T) roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/colocated-containers-with-missing-role.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{torRelease, ntpRelease}, nil) assert.Nil(roleManifest) - assert.EqualError(err, `roles[main-role].colocated_containers[0]: Invalid value: "to-be-colocated-typo": There is no such role defined`) + assert.EqualError(err, `instance_groups[main-role].colocated_containers[0]: Invalid value: "to-be-colocated-typo": There is no such instance group defined`) } func TestLoadRoleManifestColocatedContainersValidationUsusedRole(t *testing.T) { @@ -1210,9 +1210,9 @@ func TestLoadRoleManifestColocatedContainersValidationUsusedRole(t *testing.T) { roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/colocated-containers-with-unused-role.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{torRelease, ntpRelease}, nil) assert.Nil(roleManifest) - assert.EqualError(err, "role[to-be-colocated].job[ntpd].consumes[ntp-server]: Required value: failed to resolve provider ntp-server (type ntpd)\n"+ - "role[orphaned].job[ntpd].consumes[ntp-server]: Required value: failed to resolve provider ntp-server (type ntpd)\n"+ - "role[orphaned]: Not found: \"role is of type colocated container, but is not used by any other role as such\"") + assert.EqualError(err, "instance_group[to-be-colocated].job[ntpd].consumes[ntp-server]: Required value: failed to resolve provider ntp-server (type ntpd)\n"+ + "instance_group[orphaned].job[ntpd].consumes[ntp-server]: Required value: failed to resolve provider ntp-server (type ntpd)\n"+ + "instance_group[orphaned]: Not found: \"instance group is of type colocated container, but is not used by any other instance group as such\"") } func TestLoadRoleManifestColocatedContainersValidationPortCollisions(t *testing.T) { @@ -1232,8 +1232,8 @@ func TestLoadRoleManifestColocatedContainersValidationPortCollisions(t *testing. roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/colocated-containers-with-port-collision.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{torRelease, ntpRelease}, nil) assert.Nil(roleManifest) - assert.EqualError(err, "role[main-role]: Invalid value: \"TCP/10443\": port collision, the same protocol/port is used by: main-role, to-be-colocated"+"\n"+ - "role[main-role]: Invalid value: \"TCP/80\": port collision, the same protocol/port is used by: main-role, to-be-colocated") + assert.EqualError(err, "instance_group[main-role]: Invalid value: \"TCP/10443\": port collision, the same protocol/port is used by: main-role, to-be-colocated"+"\n"+ + "instance_group[main-role]: Invalid value: \"TCP/80\": port collision, the same protocol/port is used by: main-role, to-be-colocated") } func TestLoadRoleManifestColocatedContainersValidationPortCollisionsWithProtocols(t *testing.T) { @@ -1273,7 +1273,7 @@ func TestLoadRoleManifestColocatedContainersValidationInvalidTags(t *testing.T) roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{torRelease, ntpRelease}, nil) assert.Nil(roleManifest) - assert.EqualError(err, `roles[to-be-colocated].tags[0]: Invalid value: "headless": headless tag is only supported in [bosh, docker] roles, not colocated-container`) + assert.EqualError(err, `instance_groups[to-be-colocated].tags[0]: Invalid value: "headless": headless tag is only supported in [bosh, docker] instance groups, not colocated-container`) } func TestLoadRoleManifestColocatedContainersValidationOfSharedVolumes(t *testing.T) { @@ -1293,7 +1293,7 @@ func TestLoadRoleManifestColocatedContainersValidationOfSharedVolumes(t *testing roleManifestPath := filepath.Join(workDir, "../test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml") roleManifest, err := LoadRoleManifest(roleManifestPath, []*Release{torRelease, ntpRelease}, nil) assert.Nil(roleManifest) - assert.EqualError(err, "role[to-be-colocated]: Invalid value: \"/mnt/foobAr\": colocated role specifies a shared volume with tag mount-share, which path does not match the path of the main role shared volume with the same tag\n"+ - "role[main-role]: Required value: container must use shared volumes of the main role: vcap-logs\n"+ - "role[main-role]: Required value: container must use shared volumes of the main role: vcap-store") + assert.EqualError(err, "instance_group[to-be-colocated]: Invalid value: \"/mnt/foobAr\": colocated instance group specifies a shared volume with tag mount-share, which path does not match the path of the main instance group shared volume with the same tag\n"+ + "instance_group[main-role]: Required value: container must use shared volumes of the main instance group: vcap-logs\n"+ + "instance_group[main-role]: Required value: container must use shared volumes of the main instance group: vcap-store") } diff --git a/scripts/dockerfiles/Dockerfile-role b/scripts/dockerfiles/Dockerfile-role index 44bfb5c3..815caec5 100644 --- a/scripts/dockerfiles/Dockerfile-role +++ b/scripts/dockerfiles/Dockerfile-role @@ -4,7 +4,7 @@ FROM {{ index . "base_image" }} MAINTAINER cloudfoundry@suse.example {{ end }} -LABEL "role"="{{ .role.Name }}" +LABEL "instance_group"="{{ .instance_group.Name }}" ADD root / diff --git a/scripts/dockerfiles/pre-stop.sh b/scripts/dockerfiles/pre-stop.sh index c8250bac..fe3d6d49 100755 --- a/scripts/dockerfiles/pre-stop.sh +++ b/scripts/dockerfiles/pre-stop.sh @@ -44,7 +44,7 @@ fi set -o errexit echo "Running pre-stop script..." -{{ if ne .role.Type "bosh-task" }} +{{ if ne .instance_group.Type "bosh-task" }} processes=($(/var/vcap/bosh/bin/monit summary | awk '$1 == "Process" { print $2 }' | tr -d "'")) # Lifecycle: Stop: 1. `monit unmonitor` is called for each process @@ -52,7 +52,7 @@ echo "Running pre-stop script..." # Lifecycle: Stop: 2. Drain scripts # We exec ourselves via xargs to run things in parallel and collect exit status - echo {{ range .role.RoleJobs }} {{ .Name }} {{ end }} | xargs --max-args=1 --max-procs=0 "${0}" + echo {{ range .instance_group.JobReferences }} {{ .Name }} {{ end }} | xargs --max-args=1 --max-procs=0 "${0}" # Lifecycle: Stop: 3. `monit stop` is called for each process echo "${processes[@]}" | xargs --max-args=1 /var/vcap/bosh/bin/monit stop diff --git a/scripts/dockerfiles/run.sh b/scripts/dockerfiles/run.sh index 5732ca09..ed1c609c 100755 --- a/scripts/dockerfiles/run.sh +++ b/scripts/dockerfiles/run.sh @@ -99,15 +99,15 @@ fi # Write a couple of identification files for the stemcell mkdir -p /var/vcap/instance -echo {{ .role.Name }} > /var/vcap/instance/name +echo {{ .instance_group.Name }} > /var/vcap/instance/name echo "${KUBE_COMPONENT_INDEX}" > /var/vcap/instance/id # Run custom environment scripts (that are sourced) -{{ range $script := .role.EnvironScripts }} +{{ range $script := .instance_group.EnvironScripts }} source {{ script_path $script }} {{ end }} # Run custom role scripts -{{ range $script := .role.Scripts}} +{{ range $script := .instance_group.Scripts}} bash {{ script_path $script }} {{ end }} @@ -132,7 +132,7 @@ then chmod 1730 /var/spool/cron/tabs/ fi -{{ if eq .role.Type "bosh-task" }} +{{ if eq .instance_group.Type "bosh-task" }} # Start rsyslog and cron /usr/sbin/rsyslogd cron @@ -142,7 +142,7 @@ fi # Run custom post config role scripts # Run any custom scripts other than pre-start -{{ range $script := .role.PostConfigScripts}} +{{ range $script := .instance_group.PostConfigScripts}} echo bash {{ script_path $script }} bash {{ script_path $script }} {{ end }} @@ -170,9 +170,9 @@ for fname in $(sorted-pre-start-paths) ; do done # Run -{{ if eq .role.Type "bosh-task" }} +{{ if eq .instance_group.Type "bosh-task" }} idx=0 - {{ range $job := .role.RoleJobs}} + {{ range $job := .instance_group.JobReferences}} if [ -x /var/vcap/jobs/{{ $job.Name }}/bin/run ] ; then /var/vcap/jobs/{{ $job.Name }}/bin/run idx=$((idx + 1)) diff --git a/test-assets/role-manifests/app/hashmat.yml b/test-assets/role-manifests/app/hashmat.yml index ef9f38b9..1b7f01fd 100644 --- a/test-assets/role-manifests/app/hashmat.yml +++ b/test-assets/role-manifests/app/hashmat.yml @@ -1,6 +1,6 @@ # This role manifest is used to test validation --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/app/roles-to-build.yml b/test-assets/role-manifests/app/roles-to-build.yml index fb449c4d..48deddb4 100644 --- a/test-assets/role-manifests/app/roles-to-build.yml +++ b/test-assets/role-manifests/app/roles-to-build.yml @@ -1,6 +1,6 @@ # This role manifest is used to test selecting which roles to build --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/app/tor-validation-issues.yml b/test-assets/role-manifests/app/tor-validation-issues.yml index c0f63840..bf305282 100644 --- a/test-assets/role-manifests/app/tor-validation-issues.yml +++ b/test-assets/role-manifests/app/tor-validation-issues.yml @@ -1,6 +1,6 @@ # This role manifest is used to check that various validation issues are found --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/app/tor-validation-ok.yml b/test-assets/role-manifests/app/tor-validation-ok.yml index de842908..a9dc5002 100644 --- a/test-assets/role-manifests/app/tor-validation-ok.yml +++ b/test-assets/role-manifests/app/tor-validation-ok.yml @@ -1,6 +1,6 @@ # This role manifest should pass validation with no issues --- -roles: +instance_groups: - name: myrole environment_scripts: - environ.sh diff --git a/test-assets/role-manifests/app/two-roles.yml b/test-assets/role-manifests/app/two-roles.yml index 84254703..83840d41 100644 --- a/test-assets/role-manifests/app/two-roles.yml +++ b/test-assets/role-manifests/app/two-roles.yml @@ -1,6 +1,6 @@ # This role manifest contains two roles --- -roles: +instance_groups: - name: myrole-deployment jobs: [] run: diff --git a/test-assets/role-manifests/builder/tor-good.yml b/test-assets/role-manifests/builder/tor-good.yml index 077e6d74..bd7b8704 100644 --- a/test-assets/role-manifests/builder/tor-good.yml +++ b/test-assets/role-manifests/builder/tor-good.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole environment_scripts: - environ.sh diff --git a/test-assets/role-manifests/compilator/tor-good.yml b/test-assets/role-manifests/compilator/tor-good.yml index 8d86d5f8..f1903e03 100644 --- a/test-assets/role-manifests/compilator/tor-good.yml +++ b/test-assets/role-manifests/compilator/tor-good.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/kube/bosh-run-count-configurable.yml b/test-assets/role-manifests/kube/bosh-run-count-configurable.yml index a1a681d6..758ea0d6 100644 --- a/test-assets/role-manifests/kube/bosh-run-count-configurable.yml +++ b/test-assets/role-manifests/kube/bosh-run-count-configurable.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/kube/colocated-containers-with-deployment-and-empty-dir.yml b/test-assets/role-manifests/kube/colocated-containers-with-deployment-and-empty-dir.yml index 463711c0..c7d42566 100644 --- a/test-assets/role-manifests/kube/colocated-containers-with-deployment-and-empty-dir.yml +++ b/test-assets/role-manifests/kube/colocated-containers-with-deployment-and-empty-dir.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group run: memory: 128 scaling: diff --git a/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml b/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml index f90e2c05..ae056310 100644 --- a/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml +++ b/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: - name: tor diff --git a/test-assets/role-manifests/kube/colocated-containers.yml b/test-assets/role-manifests/kube/colocated-containers.yml index 6b3bd082..d09fb1e1 100644 --- a/test-assets/role-manifests/kube/colocated-containers.yml +++ b/test-assets/role-manifests/kube/colocated-containers.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/kube/exposed-ports.yml b/test-assets/role-manifests/kube/exposed-ports.yml index b0f97495..572ffc65 100644 --- a/test-assets/role-manifests/kube/exposed-ports.yml +++ b/test-assets/role-manifests/kube/exposed-ports.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/kube/job-with-annotation.yml b/test-assets/role-manifests/kube/job-with-annotation.yml index 088e41ff..72790a20 100644 --- a/test-assets/role-manifests/kube/job-with-annotation.yml +++ b/test-assets/role-manifests/kube/job-with-annotation.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group type: bosh-task jobs: - name: new_hostname diff --git a/test-assets/role-manifests/kube/jobs.yml b/test-assets/role-manifests/kube/jobs.yml index 9ca55797..fbd8fc82 100644 --- a/test-assets/role-manifests/kube/jobs.yml +++ b/test-assets/role-manifests/kube/jobs.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: pre-role type: bosh-task jobs: diff --git a/test-assets/role-manifests/kube/pod-with-invalid-node-affinity.yml b/test-assets/role-manifests/kube/pod-with-invalid-node-affinity.yml index c779e556..978a7b42 100644 --- a/test-assets/role-manifests/kube/pod-with-invalid-node-affinity.yml +++ b/test-assets/role-manifests/kube/pod-with-invalid-node-affinity.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group run: memory: 128 affinity: diff --git a/test-assets/role-manifests/kube/pod-with-invalid-pod-affinity.yml b/test-assets/role-manifests/kube/pod-with-invalid-pod-affinity.yml index 1e7fe3df..67329007 100644 --- a/test-assets/role-manifests/kube/pod-with-invalid-pod-affinity.yml +++ b/test-assets/role-manifests/kube/pod-with-invalid-pod-affinity.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group run: memory: 128 affinity: diff --git a/test-assets/role-manifests/kube/pod-with-no-pod-anti-affinity.yml b/test-assets/role-manifests/kube/pod-with-no-pod-anti-affinity.yml index 95830c76..94448265 100644 --- a/test-assets/role-manifests/kube/pod-with-no-pod-anti-affinity.yml +++ b/test-assets/role-manifests/kube/pod-with-no-pod-anti-affinity.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group run: memory: 128 affinity: ~ diff --git a/test-assets/role-manifests/kube/pod-with-valid-pod-anti-affinity.yml b/test-assets/role-manifests/kube/pod-with-valid-pod-anti-affinity.yml index df67af82..d0cb1c70 100644 --- a/test-assets/role-manifests/kube/pod-with-valid-pod-anti-affinity.yml +++ b/test-assets/role-manifests/kube/pod-with-valid-pod-anti-affinity.yml @@ -1,6 +1,6 @@ --- -roles: -- name: role +instance_groups: +- name: some-group run: memory: 128 scaling: @@ -16,5 +16,5 @@ roles: - key: "skiff-role-name" operator: In values: - - role + - some-group topologyKey: "beta.kubernetes.io/os" diff --git a/test-assets/role-manifests/kube/pods.yml b/test-assets/role-manifests/kube/pods.yml index 63670561..d32c5994 100644 --- a/test-assets/role-manifests/kube/pods.yml +++ b/test-assets/role-manifests/kube/pods.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: pre-role type: bosh-task tags: [stop-on-failure] diff --git a/test-assets/role-manifests/kube/service-headed.yml b/test-assets/role-manifests/kube/service-headed.yml index 11ebcdfa..7ed8d4b2 100644 --- a/test-assets/role-manifests/kube/service-headed.yml +++ b/test-assets/role-manifests/kube/service-headed.yml @@ -1,6 +1,6 @@ # This is a role with exposed ports, and without the headless tag --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/kube/service-headless.yml b/test-assets/role-manifests/kube/service-headless.yml index 91665179..4a42c701 100644 --- a/test-assets/role-manifests/kube/service-headless.yml +++ b/test-assets/role-manifests/kube/service-headless.yml @@ -1,6 +1,6 @@ # This is a role with exposed ports, and _with_ the headless tag --- -roles: +instance_groups: - name: myrole jobs: [] tags: diff --git a/test-assets/role-manifests/kube/volumes-with-annotation.yml b/test-assets/role-manifests/kube/volumes-with-annotation.yml index 6d1e5c52..8ec3d68c 100644 --- a/test-assets/role-manifests/kube/volumes-with-annotation.yml +++ b/test-assets/role-manifests/kube/volumes-with-annotation.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: - name: tor diff --git a/test-assets/role-manifests/kube/volumes.yml b/test-assets/role-manifests/kube/volumes.yml index ef010c9c..9f6af3db 100644 --- a/test-assets/role-manifests/kube/volumes.yml +++ b/test-assets/role-manifests/kube/volumes.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: - name: tor diff --git a/test-assets/role-manifests/model/bad-cv-type-internal.yml b/test-assets/role-manifests/model/bad-cv-type-internal.yml index a4960974..174a1183 100644 --- a/test-assets/role-manifests/model/bad-cv-type-internal.yml +++ b/test-assets/role-manifests/model/bad-cv-type-internal.yml @@ -1,6 +1,6 @@ # This role manifest checks for an internal environment variable (which is invalid) --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/model/bad-cv-type.yml b/test-assets/role-manifests/model/bad-cv-type.yml index 8323a458..6f347a5f 100644 --- a/test-assets/role-manifests/model/bad-cv-type.yml +++ b/test-assets/role-manifests/model/bad-cv-type.yml @@ -1,6 +1,6 @@ # This role manifest checks for an invalid variable type --- -roles: +instance_groups: - name: myrole environment_scripts: - environ.sh diff --git a/test-assets/role-manifests/model/bosh-run-bad-cpu.yml b/test-assets/role-manifests/model/bosh-run-bad-cpu.yml index 31c63b10..17100a76 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-cpu.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-cpu.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-memory.yml b/test-assets/role-manifests/model/bosh-run-bad-memory.yml index 2fba976c..ebeb7dc5 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-memory.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-memory.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-parse.yml b/test-assets/role-manifests/model/bosh-run-bad-parse.yml index 4afe8b9f..5ac42100 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-parse.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-parse.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-port-count.yml b/test-assets/role-manifests/model/bosh-run-bad-port-count.yml index 6383da65..87b06404 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-port-count.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-port-count.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-port-names.yml b/test-assets/role-manifests/model/bosh-run-bad-port-names.yml index 1f7039f5..15226e77 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-port-names.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-port-names.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-ports.yml b/test-assets/role-manifests/model/bosh-run-bad-ports.yml index ba8d1ba6..c134fead 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-ports.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-ports.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-bad-proto.yml b/test-assets/role-manifests/model/bosh-run-bad-proto.yml index 5770ae06..5d209e05 100644 --- a/test-assets/role-manifests/model/bosh-run-bad-proto.yml +++ b/test-assets/role-manifests/model/bosh-run-bad-proto.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-env.yml b/test-assets/role-manifests/model/bosh-run-env.yml index 5cd57071..2aaedf68 100644 --- a/test-assets/role-manifests/model/bosh-run-env.yml +++ b/test-assets/role-manifests/model/bosh-run-env.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: xrole scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/bosh-run-headless-public-port.yml b/test-assets/role-manifests/model/bosh-run-headless-public-port.yml index 641accc5..87fa1c2b 100644 --- a/test-assets/role-manifests/model/bosh-run-headless-public-port.yml +++ b/test-assets/role-manifests/model/bosh-run-headless-public-port.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] tags: [ headless ] diff --git a/test-assets/role-manifests/model/bosh-run-missing-portrange.yml b/test-assets/role-manifests/model/bosh-run-missing-portrange.yml index e39d86b5..ea44e05c 100644 --- a/test-assets/role-manifests/model/bosh-run-missing-portrange.yml +++ b/test-assets/role-manifests/model/bosh-run-missing-portrange.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-missing.yml b/test-assets/role-manifests/model/bosh-run-missing.yml index d9567807..776c9250 100644 --- a/test-assets/role-manifests/model/bosh-run-missing.yml +++ b/test-assets/role-manifests/model/bosh-run-missing.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: - name: new_hostname diff --git a/test-assets/role-manifests/model/bosh-run-ok.yml b/test-assets/role-manifests/model/bosh-run-ok.yml index b0f97495..572ffc65 100644 --- a/test-assets/role-manifests/model/bosh-run-ok.yml +++ b/test-assets/role-manifests/model/bosh-run-ok.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/bosh-run-reverse-portrange.yml b/test-assets/role-manifests/model/bosh-run-reverse-portrange.yml index 78e75ce9..2edcf33a 100644 --- a/test-assets/role-manifests/model/bosh-run-reverse-portrange.yml +++ b/test-assets/role-manifests/model/bosh-run-reverse-portrange.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole jobs: [] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml b/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml index c341c7ab..a55ef8af 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml b/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml index 1fae0442..e3ef52a2 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml b/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml index 841a0803..3588523d 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml b/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml index 6a334cb1..3c4f5fc2 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml b/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml index ad5dfa3e..39e7900a 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml b/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml index ba1206b2..c0ab57be 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/colocated-containers.yml b/test-assets/role-manifests/model/colocated-containers.yml index 6b3bd082..d09fb1e1 100644 --- a/test-assets/role-manifests/model/colocated-containers.yml +++ b/test-assets/role-manifests/model/colocated-containers.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: main-role scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/docker-run-env.yml b/test-assets/role-manifests/model/docker-run-env.yml index d9e443b2..d29e2637 100644 --- a/test-assets/role-manifests/model/docker-run-env.yml +++ b/test-assets/role-manifests/model/docker-run-env.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: dockerrole type: docker run: diff --git a/test-assets/role-manifests/model/multiple-bad.yml b/test-assets/role-manifests/model/multiple-bad.yml index b5955e77..09ae0635 100644 --- a/test-assets/role-manifests/model/multiple-bad.yml +++ b/test-assets/role-manifests/model/multiple-bad.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/multiple-good.yml b/test-assets/role-manifests/model/multiple-good.yml index 471bf6fa..1e1a7c3a 100644 --- a/test-assets/role-manifests/model/multiple-good.yml +++ b/test-assets/role-manifests/model/multiple-good.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/non-bosh-roles.yml b/test-assets/role-manifests/model/non-bosh-roles.yml index 92a5f5ed..5bad5542 100644 --- a/test-assets/role-manifests/model/non-bosh-roles.yml +++ b/test-assets/role-manifests/model/non-bosh-roles.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole scripts: ["myrole.sh"] run: diff --git a/test-assets/role-manifests/model/rbac-missing-account.yml b/test-assets/role-manifests/model/rbac-missing-account.yml index de6f0006..38289053 100644 --- a/test-assets/role-manifests/model/rbac-missing-account.yml +++ b/test-assets/role-manifests/model/rbac-missing-account.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole run: service-account: missing-account diff --git a/test-assets/role-manifests/model/templates-non.yml b/test-assets/role-manifests/model/templates-non.yml index 03382280..dd2b1992 100644 --- a/test-assets/role-manifests/model/templates-non.yml +++ b/test-assets/role-manifests/model/templates-non.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole environment_scripts: - environ.sh diff --git a/test-assets/role-manifests/model/tor-bad.yml b/test-assets/role-manifests/model/tor-bad.yml index 890a274a..1b2e57cc 100644 --- a/test-assets/role-manifests/model/tor-bad.yml +++ b/test-assets/role-manifests/model/tor-bad.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole run: memory: 1 diff --git a/test-assets/role-manifests/model/tor-good.yml b/test-assets/role-manifests/model/tor-good.yml index 59e293f0..65fdcca1 100644 --- a/test-assets/role-manifests/model/tor-good.yml +++ b/test-assets/role-manifests/model/tor-good.yml @@ -1,5 +1,5 @@ --- -roles: +instance_groups: - name: myrole environment_scripts: - environ.sh diff --git a/test-assets/role-manifests/model/variable-expansion.yml b/test-assets/role-manifests/model/variable-expansion.yml index 15a32e7a..cc3bf6c1 100644 --- a/test-assets/role-manifests/model/variable-expansion.yml +++ b/test-assets/role-manifests/model/variable-expansion.yml @@ -1,6 +1,6 @@ # This role manifest is used to test mustache template expansion --- -roles: +instance_groups: - name: foorole type: bosh-task run: diff --git a/test-assets/role-manifests/model/variables-without-decl.yml b/test-assets/role-manifests/model/variables-without-decl.yml index e6b4ab52..cb1f7f70 100644 --- a/test-assets/role-manifests/model/variables-without-decl.yml +++ b/test-assets/role-manifests/model/variables-without-decl.yml @@ -1,6 +1,6 @@ # This role manifest tests that an undeclared variable is an error --- -roles: +instance_groups: - name: myrole run: foo: x diff --git a/test-assets/role-manifests/model/variables-without-usage.yml b/test-assets/role-manifests/model/variables-without-usage.yml index 96e4630e..7451bdc4 100644 --- a/test-assets/role-manifests/model/variables-without-usage.yml +++ b/test-assets/role-manifests/model/variables-without-usage.yml @@ -1,6 +1,6 @@ # This role manifest tests that unused variables are an error --- -roles: +instance_groups: - name: myrole jobs: - name: tor From 08e569637c0fc2e31f4cdcaa5378804cfa9b5838 Mon Sep 17 00:00:00 2001 From: Mario Manno Date: Wed, 1 Aug 2018 11:45:35 +0200 Subject: [PATCH 2/4] Rename role in docs/ --- docs/capabilities.md | 33 +++++++++---------- docs/configuration.md | 36 ++++++++++----------- docs/generated/fissile.md | 2 +- docs/generated/fissile_build.md | 2 +- docs/generated/fissile_build_cleancache.md | 2 +- docs/generated/fissile_build_helm.md | 2 +- docs/generated/fissile_build_images.md | 14 ++++---- docs/generated/fissile_build_kube.md | 2 +- docs/generated/fissile_build_packages.md | 4 +-- docs/generated/fissile_diff.md | 2 +- docs/generated/fissile_docs.md | 2 +- docs/generated/fissile_docs_autocomplete.md | 2 +- docs/generated/fissile_docs_man.md | 2 +- docs/generated/fissile_docs_markdown.md | 2 +- docs/generated/fissile_show.md | 4 +-- docs/generated/fissile_show_image.md | 4 +-- docs/generated/fissile_show_properties.md | 2 +- docs/generated/fissile_show_release.md | 2 +- docs/generated/fissile_version.md | 2 +- docs/kubernetes.md | 32 +++++++++--------- docs/validator-description.md | 4 +-- 21 files changed, 78 insertions(+), 79 deletions(-) diff --git a/docs/capabilities.md b/docs/capabilities.md index 1515503d..17c1ad78 100644 --- a/docs/capabilities.md +++ b/docs/capabilities.md @@ -14,26 +14,25 @@ https://github.com/SUSE/fissile/wiki/Operator-overrides-of-role-capabilities # Interface The generated `values.yaml` provides operators with keys of the form -`sizing..capabilities`, one key per role. +`sizing..capabilities`, one key per instance group. -The exception are roles which are naturally at maximum privilege, -i.e. as specified by the role manifest. These have no keys, as +The exception are instance groups which are naturally at maximum privilege, +i.e. as specified by the instance group manifest. These have no keys, as overriding them to add more capabilities makes no sense. -The keys are of type list, taking the operator-defined set of -additional capabilities for a role, or `ALL`. The supplied default is -the empty list (`[]`), i.e. no privileges to be added to the role. +The keys are of type list, taking the operator-defined set of additional +capabilities for a instance group, or `ALL`. The supplied default is the empty +list (`[]`), i.e. no privileges to be added to the instance group. -Presence of the special value `ALL` in the list of a role triggers -elevation of that role to maximum privilege. Presence of all the other -capabilites just causes them to be added to the role. When `ALL` is -present none of any other specified capabilities matter to the -modified role. +Presence of the special value `ALL` in the list of a instance group triggers +elevation of that instance group to maximum privilege. Presence of all the +other capabilites just causes them to be added to the instance group. When +`ALL` is present none of any other specified capabilities matter to the +modified instance group. -Operators may write the names of capabilities in any mixed-case, and -the chart converts as necessary. In other words to chart `AUDIT_READ`, -`audit_read`, `AuDiT_ReAd`, etc. are all the same. +Operators may write the names of capabilities in any mixed-case, and the chart +converts as necessary. In other words to chart `AUDIT_READ`, `audit_read`, +`AuDiT_ReAd`, etc. are all the same. -The exception to this is the special value `ALL`, which must be -written in all-uppercase to be recognized. None of `all`, `All`, -etc. will be recognized. +The exception to this is the special value `ALL`, which must be written in +all-uppercase to be recognized. None of `all`, `All`, etc. will be recognized. diff --git a/docs/configuration.md b/docs/configuration.md index 79da2e8b..6a9ab5b9 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,7 +1,7 @@ # Fissile Configuration Fissile requires some configuration to work with BOSH releases. It is necessary to describe what docker images should be created; in general, each docker image -(termed "role") will have one or more BOSH jobs. +(termed "instance group") will have one or more BOSH jobs. There are three main files for configuration: the [role manifest], [opinions], and [dark opinions]. A set of [environment files] will also be required. @@ -22,7 +22,7 @@ fissile configuration to place `nats` in a docker image. ## Role Manifest The role manifest is the main configuration file for fissile. It must contain a -list of roles (analogous to BOSH VMs); each role will result in one docker +list of instance groups (analogous to BOSH VMs); each group will result in one docker image. It can also have a configurable variables section to describe the tunable inputs, and a configuration templates section to map those variables to BOSH properties and related BOSHisms. We will be working with the example role @@ -30,12 +30,12 @@ manifest for NATS: ```yaml instance_groups: -- name: nats # The name of the role - jobs: # BOSH jobs this role will have +- name: nats # The name of the instance group + jobs: # BOSH jobs this group will have - name: nats release_name: nats # The name of the BOSH release this is from tags: - - indexed # Mark this role as indexed (load-balanced) => StatefulSet + - indexed # Mark this group as indexed (load-balanced) => StatefulSet run: # Runtime configuration scaling: # Auto-scaling limits min: 1 @@ -79,14 +79,14 @@ Name | Description -- | -- `DNS_RECORD_NAME` | Hostname of the container `IP_ADDRESS` | Primary IP address of the container -`KUBE_COMPONENT_INDEX` | Numeric index for roles with multiple replicas +`KUBE_COMPONENT_INDEX` | Numeric index for instance groups with multiple replicas `KUBERNETES_CLUSTER_DOMAIN` | Kubernetes cluster domain, `cluster.local` by default [run.sh]: https://github.com/SUSE/fissile/blob/master/scripts/dockerfiles/run.sh There are also some fields not shown above (as the are not needed for NATS): -For the role: +For the instance group: Name | Description -- | -- @@ -100,8 +100,8 @@ For the `run` section: Name | Description -- | -- `capabilities` | additional capabilities to grant the container (see `man 7 capabilities`); drop the `CAP_` prefix (e.g. use `NET_ADMIN`) -`persistent-volumes` | volumes to attach to the role -`shared-volumes` | volumes shared across all containers of the role +`persistent-volumes` | volumes to attach to the instance group +`shared-volumes` | volumes shared across all containers of the instance group `healthcheck` | optional healthchecking parameters, see below `env` | list of environment variables, as `FOO=bar` `flight-stage` | one of `pre-flight`, `post-flight`, `manual`, or `flight` (default). The first three are for jobs. @@ -134,25 +134,25 @@ headers (for example, to set the `Accept:` header to request JSON responses). ## Tagging -The NATS role above was tagged as `indexed`, causing fissile to emit +The NATS instance group above was tagged as `indexed`, causing fissile to emit it as a [StatefulSet]. -The second way of causing fissile to do that is to tag a role as +The second way of causing fissile to do that is to tag a instance group as `clustered`. The main difference between `clustered` and `indexed` -roles is that fissile creates a public service (of type +groups is that fissile creates a public service (of type `LoadBalancer`) for the latter, providing a single point of access to -the pods for the role. +the pods for the group. -Note that both `clustered` and `indexed` roles can take advantage of +Note that both `clustered` and `indexed` index groups can take advantage of volume claim templates for local storage. -Therefore the user should index roles which require load balancing and +Therefore the user should index index groups which require load balancing and need a 0-based, incremented index, and mark them as clustered otherwise. -An example of a clustered role is the MYSQL database of CF. See the +An example of a clustered index group is the MYSQL database of CF. See the example below. While mysql actually needs a load balancer for access -this role is made explicit in CF through role `mysql-proxy`. +this index group is made explicit in CF through index group `mysql-proxy`. ```yaml instance_groups: @@ -204,7 +204,7 @@ the upstream defaults, they can be stored in an opinions file which will be embedded within the docker image. An additional dark opinions file is used to ensure that we block out anything that must be different per-cluster (for example, passwords). A third file is used for the variables found in last -section of the role manifest. For the NATS role, we can use the following files: +section of the role manifest. For the NATS instance group, we can use the following files: - opinions.yml ```yaml diff --git a/docs/generated/fissile.md b/docs/generated/fissile.md index 41956ad2..a4d8f78f 100644 --- a/docs/generated/fissile.md +++ b/docs/generated/fissile.md @@ -29,7 +29,7 @@ agent. -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build.md b/docs/generated/fissile_build.md index a23fb1d9..7144ec38 100644 --- a/docs/generated/fissile_build.md +++ b/docs/generated/fissile_build.md @@ -35,7 +35,7 @@ language file for troubleshooting purposes. -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build_cleancache.md b/docs/generated/fissile_build_cleancache.md index 62a2fdef..f2887788 100644 --- a/docs/generated/fissile_build_cleancache.md +++ b/docs/generated/fissile_build_cleancache.md @@ -31,7 +31,7 @@ fissile build cleancache -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build_helm.md b/docs/generated/fissile_build_helm.md index 349237d5..8edb5878 100644 --- a/docs/generated/fissile_build_helm.md +++ b/docs/generated/fissile_build_helm.md @@ -41,7 +41,7 @@ fissile build helm -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build_images.md b/docs/generated/fissile_build_images.md index c2e15722..ce362fa2 100644 --- a/docs/generated/fissile_build_images.md +++ b/docs/generated/fissile_build_images.md @@ -6,24 +6,24 @@ Builds Docker images from your BOSH releases. -This command goes through all the role definitions in the role manifest creating a +This command goes through all the instance group definitions in the role manifest creating a Dockerfile for each of them and building it. -Each role gets a directory `/dockerfiles`. In each directory one can find +Each instance group gets a directory `/dockerfiles`. In each directory one can find a Dockerfile and a directory structure that gets ADDed to the docker image. The directory structure contains jobs, packages and all other necessary scripts and templates. -The images will have a 'role' label useful for filtering. +The images will have a '`instance_group`' label useful for filtering. The entrypoint for each image is `/opt/fissile/run.sh`. -The images will be tagged: `-:`. +The images will be tagged: `-:`. The SIGNATURE is based on the hashes of all jobs and packages that are included in the image. The `--patch-properties-release` flag is used to distinguish the patchProperties release/job spec from other specs. At most one is allowed. - + ``` fissile build images @@ -37,7 +37,7 @@ fissile build images -N, --no-build If specified, the Dockerfile and assets will be created, but the image won't be built. -O, --output-directory string Output the result as tar files in the given directory rather than building with docker -P, --patch-properties-release string Used to designate a "patch-properties" psuedo-job in a particular release. Format: RELEASE/JOB. - --roles string Build only images with the given role name; comma separated. + --roles string Build only images with the given instance group name; comma separated. -s, --stemcell string The source stemcell --stemcell-id string Docker image ID for the stemcell (intended for CI) --tag-extra string Additional information to use in computing the image tags @@ -61,7 +61,7 @@ fissile build images -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build_kube.md b/docs/generated/fissile_build_kube.md index b046369a..5b4f1496 100644 --- a/docs/generated/fissile_build_kube.md +++ b/docs/generated/fissile_build_kube.md @@ -39,7 +39,7 @@ fissile build kube -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_build_packages.md b/docs/generated/fissile_build_packages.md index 8e208509..34ec9360 100644 --- a/docs/generated/fissile_build_packages.md +++ b/docs/generated/fissile_build_packages.md @@ -30,7 +30,7 @@ fissile build packages ``` --docker-network-mode string Specify network mode to be used when building with docker. e.g. "--docker-network-mode host" is equivalent to "docker run --network=host" --only-releases string Build only packages for the given release names; comma separated. - --roles string Build only packages for the given role names; comma separated. + --roles string Build only packages for the given instance group names; comma separated. -s, --stemcell string The source stemcell --without-docker Build without docker; this may adversely affect your system. Only supported on Linux, and requires CAP_SYS_ADMIN. ``` @@ -53,7 +53,7 @@ fissile build packages -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_diff.md b/docs/generated/fissile_diff.md index 7b42779e..3de88f79 100644 --- a/docs/generated/fissile_diff.md +++ b/docs/generated/fissile_diff.md @@ -32,7 +32,7 @@ fissile diff -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_docs.md b/docs/generated/fissile_docs.md index f3a78b4e..d5862d32 100644 --- a/docs/generated/fissile_docs.md +++ b/docs/generated/fissile_docs.md @@ -24,7 +24,7 @@ Has subcommands to create documentation for fissile. -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_docs_autocomplete.md b/docs/generated/fissile_docs_autocomplete.md index 8b275527..cf81fe99 100644 --- a/docs/generated/fissile_docs_autocomplete.md +++ b/docs/generated/fissile_docs_autocomplete.md @@ -36,7 +36,7 @@ fissile docs autocomplete -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_docs_man.md b/docs/generated/fissile_docs_man.md index 12b50dc5..d8893a00 100644 --- a/docs/generated/fissile_docs_man.md +++ b/docs/generated/fissile_docs_man.md @@ -34,7 +34,7 @@ fissile docs man -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_docs_markdown.md b/docs/generated/fissile_docs_markdown.md index ef1f2dbd..c616a963 100644 --- a/docs/generated/fissile_docs_markdown.md +++ b/docs/generated/fissile_docs_markdown.md @@ -34,7 +34,7 @@ fissile docs markdown -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_show.md b/docs/generated/fissile_show.md index d730146b..72796e05 100644 --- a/docs/generated/fissile_show.md +++ b/docs/generated/fissile_show.md @@ -24,7 +24,7 @@ Has subcommands that display information about build artifacts. -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. @@ -32,7 +32,7 @@ Has subcommands that display information about build artifacts. ### SEE ALSO * [fissile](fissile.md) - The BOSH disintegrator -* [fissile show image](fissile_show_image.md) - Displays information about role images. +* [fissile show image](fissile_show_image.md) - Displays information about instance group images. * [fissile show properties](fissile_show_properties.md) - Displays information about BOSH properties, per jobs. * [fissile show release](fissile_show_release.md) - Displays information about BOSH releases. diff --git a/docs/generated/fissile_show_image.md b/docs/generated/fissile_show_image.md index a1562fa7..14d04e4f 100644 --- a/docs/generated/fissile_show_image.md +++ b/docs/generated/fissile_show_image.md @@ -6,7 +6,7 @@ Displays information about role images. -This command lists all the final docker image names for all the roles defined in +This command lists all the final docker image names for all the instance groups defined in your role manifest. This command is useful in conjunction with docker (e.g. `docker rmi $(fissile show image)`). @@ -41,7 +41,7 @@ fissile show image -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_show_properties.md b/docs/generated/fissile_show_properties.md index b9f9e0e0..3eddce17 100644 --- a/docs/generated/fissile_show_properties.md +++ b/docs/generated/fissile_show_properties.md @@ -31,7 +31,7 @@ fissile show properties -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_show_release.md b/docs/generated/fissile_show_release.md index ac86c670..4ad6808f 100644 --- a/docs/generated/fissile_show_release.md +++ b/docs/generated/fissile_show_release.md @@ -31,7 +31,7 @@ fissile show release -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/generated/fissile_version.md b/docs/generated/fissile_version.md index ab0d1695..7781d1fc 100644 --- a/docs/generated/fissile_version.md +++ b/docs/generated/fissile_version.md @@ -28,7 +28,7 @@ fissile version -n, --release-name string Name of a dev BOSH release; if empty, default configured dev release name will be used; Final release always use the name in release.MF -v, --release-version string Version of a dev BOSH release; if empty, the latest dev release will be used; Final release always use the version in release.MF -p, --repository string Repository name prefix used to create image names. (default "fissile") - -m, --role-manifest string Path to a yaml file that details which jobs are used for each role. + -m, --role-manifest string Path to a yaml file that details which jobs are used for each instance group. -V, --verbose Enable verbose output. -w, --work-dir string Path to the location of the work directory. (default "/var/fissile") -W, --workers int Number of workers to use; zero means determine based on CPU count. diff --git a/docs/kubernetes.md b/docs/kubernetes.md index e6a80696..72dc03f4 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -2,7 +2,7 @@ Fissile has experimental support for generating [Kubernetes] resource definitions. It will create one workload (deployment, stateful set, or job) per -defined role, and any associated services (and volume claims) required. +defined instance group, and any associated services (and volume claims) required. [Kubernetes]: https://kubernetes.io/ @@ -38,38 +38,38 @@ cluster that is needed for other purposes. Fissile emits a [StatefulSet] under two circumstances. -Any self-clustering roles (i.e. any role with the `clustered` tag) +Any self-clustering instance groups (i.e. any group with the `clustered` tag) will be a StatefulSet, in order for each pod to be addressable (so -that they can talk to each other). For example, a `doppler` role would +that they can talk to each other). For example, a `doppler` group would fall under this category. -Secondly, any roles tagged as `indexed`. An example of such would be -the CF role `nats`. These are roles which require load balancing and +Secondly, any instance groups tagged as `indexed`. An example of such would be +the CF instance group `nats`. These are groups which require load balancing and need a 0-based, incremented index. To support this fissile creates a -public service (of type `LoadBalancer`) for indexed roles, providing a -single point of access to the pods for the role. +public service (of type `LoadBalancer`) for indexed groups, providing a +single point of access to the pods for the group. -Note that both `clustered` and `indexed` roles can take advantage of +Note that both `clustered` and `indexed` instance groups can take advantage of volume claim templates for local storage. -__Attention__: The automatic emission of StatefulSet for roles which -have volume specifications has been removed. All roles now have to be +__Attention__: The automatic emission of StatefulSet for instance groups which +have volume specifications has been removed. All instance groups now have to be explicitly tagged as described above. [StatefulSet]: https://kubernetes.io/docs/resources-reference/v1.6/#statefulset-v1beta1-apps ### Deployment -All roles without the above constraints will be generated as deployments. +All instance groups without the above constraints will be generated as deployments. ## Services -Each role may have attached services generated as necessary. There are three +Each instance group may have attached services generated as necessary. There are three general conditions: - Each StatefulSet will have a headless service (e.g. `nats-set`); this is used to manage the StatefulSet (a Kubernetes requirement), and to allow discovery - of pods within a role via DNS. -- A role may have a service for its public ports, if any port is public. -- A role may have a service for its private ports, if any ports are defined. - Public ports will also be listed to ease communication across roles (not + of pods within a instance group via DNS. +- A instance group may have a service for its public ports, if any port is public. +- A instance group may have a service for its private ports, if any ports are defined. + Public ports will also be listed to ease communication across instance groups (not having to use different names depending on whether a port is public). diff --git a/docs/validator-description.md b/docs/validator-description.md index f62be0d2..4f627731 100644 --- a/docs/validator-description.md +++ b/docs/validator-description.md @@ -45,8 +45,8 @@ First an overview. The details of each check follow in subsections. 1. The role manifest must not contain any constants in the global section 1. All of the scripts must be used 1. Check clustering - 1. The run.env references of docker roles must use only declared parameters - 1. No non-docker role may declare 'run.env' + 1. The run.env references of docker instance groups must use only declared parameters + 1. No non-docker instance group may declare 'run.env' ### All dark opinions must be configured as templates From b0413636d821670b94554040676a4d465fe10617 Mon Sep 17 00:00:00 2001 From: Mario Manno Date: Wed, 1 Aug 2018 11:51:19 +0200 Subject: [PATCH 3/4] Rename create-role-image tag --- app/fissile.go | 4 ++-- builder/role_image.go | 2 +- builder/role_image_test.go | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/app/fissile.go b/app/fissile.go index 92abe27f..d8990f04 100644 --- a/app/fissile.go +++ b/app/fissile.go @@ -528,8 +528,8 @@ func (f *Fissile) GenerateRoleImages(targetPath, registry, organization, reposit } if metricsPath != "" { - stampy.Stamp(metricsPath, "fissile", "create-role-images", "start") - defer stampy.Stamp(metricsPath, "fissile", "create-role-images", "done") + stampy.Stamp(metricsPath, "fissile", "create-images", "start") + defer stampy.Stamp(metricsPath, "fissile", "create-images", "done") } roleManifest, err := model.LoadRoleManifest(roleManifestPath, f.releases, f) diff --git a/builder/role_image.go b/builder/role_image.go index 95075faa..974b007b 100644 --- a/builder/role_image.go +++ b/builder/role_image.go @@ -426,7 +426,7 @@ func (j roleBuildJob) Run() { } if j.builder.metricsPath != "" { - seriesName := fmt.Sprintf("create-role-images::%s", roleImageName) + seriesName := fmt.Sprintf("create-images::%s", roleImageName) stampy.Stamp(j.builder.metricsPath, "fissile", seriesName, "start") defer stampy.Stamp(j.builder.metricsPath, "fissile", seriesName, "done") diff --git a/builder/role_image_test.go b/builder/role_image_test.go index 2fab9c6b..10fdf0fb 100644 --- a/builder/role_image_test.go +++ b/builder/role_image_test.go @@ -596,10 +596,10 @@ func TestBuildRoleImages(t *testing.T) { ) assert.NoError(err) - expected := `.*,fissile,create-role-images::test-registry.com:9000/test-organization/test-repository-myrole:[a-z0-9]{40},start -.*,fissile,create-role-images::test-registry.com:9000/test-organization/test-repository-myrole:[a-z0-9]{40},done -.*,fissile,create-role-images::test-registry.com:9000/test-organization/test-repository-foorole:[a-z0-9]{40},start -.*,fissile,create-role-images::test-registry.com:9000/test-organization/test-repository-foorole:[a-z0-9]{40},done` + expected := `.*,fissile,create-images::test-registry.com:9000/test-organization/test-repository-myrole:[a-z0-9]{40},start +.*,fissile,create-images::test-registry.com:9000/test-organization/test-repository-myrole:[a-z0-9]{40},done +.*,fissile,create-images::test-registry.com:9000/test-organization/test-repository-foorole:[a-z0-9]{40},start +.*,fissile,create-images::test-registry.com:9000/test-organization/test-repository-foorole:[a-z0-9]{40},done` contents, err := ioutil.ReadFile(metrics) assert.NoError(err) From c834d98bb714d506bedf3f72919135c363479d98 Mon Sep 17 00:00:00 2001 From: Mario Manno Date: Fri, 3 Aug 2018 12:07:04 +0200 Subject: [PATCH 4/4] Rename release_name to release --- docs/configuration.md | 4 ++-- model/roles.go | 4 ++-- test-assets/role-manifests/app/hashmat.yml | 2 +- test-assets/role-manifests/app/roles-to-build.yml | 6 +++--- test-assets/role-manifests/app/tor-validation-issues.yml | 6 +++--- test-assets/role-manifests/app/tor-validation-ok.yml | 6 +++--- test-assets/role-manifests/builder/tor-good.yml | 6 +++--- test-assets/role-manifests/compilator/tor-good.yml | 6 +++--- ...located-containers-with-stateful-set-and-empty-dir.yml | 4 ++-- test-assets/role-manifests/kube/colocated-containers.yml | 6 +++--- test-assets/role-manifests/kube/job-with-annotation.yml | 2 +- test-assets/role-manifests/kube/jobs.yml | 4 ++-- test-assets/role-manifests/kube/pods.yml | 4 ++-- .../role-manifests/kube/volumes-with-annotation.yml | 2 +- test-assets/role-manifests/kube/volumes.yml | 2 +- test-assets/role-manifests/model/bad-cv-type-internal.yml | 2 +- test-assets/role-manifests/model/bad-cv-type.yml | 6 +++--- test-assets/role-manifests/model/bosh-run-env.yml | 4 ++-- test-assets/role-manifests/model/bosh-run-missing.yml | 6 +++--- .../model/colocated-containers-with-clustered-tag.yml | 6 +++--- .../model/colocated-containers-with-missing-role.yml | 4 ++-- .../model/colocated-containers-with-no-port-collision.yml | 6 +++--- .../model/colocated-containers-with-port-collision.yml | 6 +++--- .../model/colocated-containers-with-unused-role.yml | 8 ++++---- .../colocated-containers-with-volume-share-issues.yml | 6 +++--- test-assets/role-manifests/model/colocated-containers.yml | 6 +++--- test-assets/role-manifests/model/multiple-bad.yml | 8 ++++---- test-assets/role-manifests/model/multiple-good.yml | 8 ++++---- test-assets/role-manifests/model/non-bosh-roles.yml | 6 +++--- test-assets/role-manifests/model/templates-non.yml | 6 +++--- test-assets/role-manifests/model/tor-bad.yml | 6 +++--- test-assets/role-manifests/model/tor-good.yml | 6 +++--- test-assets/role-manifests/model/variable-expansion.yml | 2 +- .../role-manifests/model/variables-without-decl.yml | 2 +- .../role-manifests/model/variables-without-usage.yml | 2 +- 35 files changed, 85 insertions(+), 85 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 6a9ab5b9..da114083 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -33,7 +33,7 @@ instance_groups: - name: nats # The name of the instance group jobs: # BOSH jobs this group will have - name: nats - release_name: nats # The name of the BOSH release this is from + release: nats # The name of the BOSH release this is from tags: - indexed # Mark this group as indexed (load-balanced) => StatefulSet run: # Runtime configuration @@ -159,7 +159,7 @@ instance_groups: - name: mysql jobs: - name: mysql - release_name: cf-mysql + release: cf-mysql provides: mysql: {} processes: diff --git a/model/roles.go b/model/roles.go index 45d77b80..ba09f20a 100644 --- a/model/roles.go +++ b/model/roles.go @@ -65,8 +65,8 @@ type RoleManifest struct { // JobReference represents a job in the context of a role type JobReference struct { *Job `yaml:"-"` // The resolved job - Name string `yaml:"name"` // The name of the job - ReleaseName string `yaml:"release_name"` // The release the job comes from + Name string `yaml:"name"` // The name of the job + ReleaseName string `yaml:"release"` // The release the job comes from ExportedProviders map[string]jobProvidesInfo `yaml:"provides"` ResolvedConsumers map[string]jobConsumesInfo `yaml:"consumes"` } diff --git a/test-assets/role-manifests/app/hashmat.yml b/test-assets/role-manifests/app/hashmat.yml index 1b7f01fd..65fb7b2c 100644 --- a/test-assets/role-manifests/app/hashmat.yml +++ b/test-assets/role-manifests/app/hashmat.yml @@ -6,7 +6,7 @@ instance_groups: foo: x jobs: - name: hashmat - release_name: tor + release: tor configuration: variables: - name: FOO diff --git a/test-assets/role-manifests/app/roles-to-build.yml b/test-assets/role-manifests/app/roles-to-build.yml index 48deddb4..f5ead823 100644 --- a/test-assets/role-manifests/app/roles-to-build.yml +++ b/test-assets/role-manifests/app/roles-to-build.yml @@ -6,13 +6,13 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor diff --git a/test-assets/role-manifests/app/tor-validation-issues.yml b/test-assets/role-manifests/app/tor-validation-issues.yml index bf305282..165ccf87 100644 --- a/test-assets/role-manifests/app/tor-validation-issues.yml +++ b/test-assets/role-manifests/app/tor-validation-issues.yml @@ -6,9 +6,9 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor configuration: templates: properties.tor.bogus: BOGUS @@ -18,7 +18,7 @@ instance_groups: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/app/tor-validation-ok.yml b/test-assets/role-manifests/app/tor-validation-ok.yml index a9dc5002..9ff6c535 100644 --- a/test-assets/role-manifests/app/tor-validation-ok.yml +++ b/test-assets/role-manifests/app/tor-validation-ok.yml @@ -15,16 +15,16 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/builder/tor-good.yml b/test-assets/role-manifests/builder/tor-good.yml index bd7b8704..5ea1e6e3 100644 --- a/test-assets/role-manifests/builder/tor-good.yml +++ b/test-assets/role-manifests/builder/tor-good.yml @@ -14,16 +14,16 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/compilator/tor-good.yml b/test-assets/role-manifests/compilator/tor-good.yml index f1903e03..f64b1d60 100644 --- a/test-assets/role-manifests/compilator/tor-good.yml +++ b/test-assets/role-manifests/compilator/tor-good.yml @@ -5,13 +5,13 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor diff --git a/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml b/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml index ae056310..d69ba0dc 100644 --- a/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml +++ b/test-assets/role-manifests/kube/colocated-containers-with-stateful-set-and-empty-dir.yml @@ -3,7 +3,7 @@ instance_groups: - name: myrole jobs: - name: tor - release_name: tor + release: tor run: capabilities: - something @@ -28,7 +28,7 @@ instance_groups: type: colocated-container jobs: - name: tor - release_name: tor + release: tor run: capabilities: - something diff --git a/test-assets/role-manifests/kube/colocated-containers.yml b/test-assets/role-manifests/kube/colocated-containers.yml index d09fb1e1..78bc43ab 100644 --- a/test-assets/role-manifests/kube/colocated-containers.yml +++ b/test-assets/role-manifests/kube/colocated-containers.yml @@ -10,9 +10,9 @@ instance_groups: tag: shared-data jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -20,7 +20,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 volumes: diff --git a/test-assets/role-manifests/kube/job-with-annotation.yml b/test-assets/role-manifests/kube/job-with-annotation.yml index 72790a20..c99e5448 100644 --- a/test-assets/role-manifests/kube/job-with-annotation.yml +++ b/test-assets/role-manifests/kube/job-with-annotation.yml @@ -4,7 +4,7 @@ instance_groups: type: bosh-task jobs: - name: new_hostname - release_name: tor + release: tor run: flight-stage: post-flight memory: 128 diff --git a/test-assets/role-manifests/kube/jobs.yml b/test-assets/role-manifests/kube/jobs.yml index fbd8fc82..9017f76a 100644 --- a/test-assets/role-manifests/kube/jobs.yml +++ b/test-assets/role-manifests/kube/jobs.yml @@ -4,7 +4,7 @@ instance_groups: type: bosh-task jobs: - name: new_hostname - release_name: tor + release: tor run: flight-stage: pre-flight memory: 128 @@ -12,7 +12,7 @@ instance_groups: type: bosh-task jobs: - name: tor - release_name: tor + release: tor run: flight-stage: post-flight memory: 256 diff --git a/test-assets/role-manifests/kube/pods.yml b/test-assets/role-manifests/kube/pods.yml index d32c5994..34cf1119 100644 --- a/test-assets/role-manifests/kube/pods.yml +++ b/test-assets/role-manifests/kube/pods.yml @@ -5,7 +5,7 @@ instance_groups: tags: [stop-on-failure] jobs: - name: new_hostname - release_name: tor + release: tor run: flight-stage: pre-flight memory: 128 @@ -19,7 +19,7 @@ instance_groups: tags: [stop-on-failure] jobs: - name: tor - release_name: tor + release: tor run: flight-stage: post-flight memory: 256 diff --git a/test-assets/role-manifests/kube/volumes-with-annotation.yml b/test-assets/role-manifests/kube/volumes-with-annotation.yml index 8ec3d68c..9d1157ad 100644 --- a/test-assets/role-manifests/kube/volumes-with-annotation.yml +++ b/test-assets/role-manifests/kube/volumes-with-annotation.yml @@ -3,7 +3,7 @@ instance_groups: - name: myrole jobs: - name: tor - release_name: tor + release: tor run: capabilities: - something diff --git a/test-assets/role-manifests/kube/volumes.yml b/test-assets/role-manifests/kube/volumes.yml index 9f6af3db..6243b083 100644 --- a/test-assets/role-manifests/kube/volumes.yml +++ b/test-assets/role-manifests/kube/volumes.yml @@ -3,7 +3,7 @@ instance_groups: - name: myrole jobs: - name: tor - release_name: tor + release: tor run: capabilities: - something diff --git a/test-assets/role-manifests/model/bad-cv-type-internal.yml b/test-assets/role-manifests/model/bad-cv-type-internal.yml index 174a1183..8f124444 100644 --- a/test-assets/role-manifests/model/bad-cv-type-internal.yml +++ b/test-assets/role-manifests/model/bad-cv-type-internal.yml @@ -6,7 +6,7 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/bad-cv-type.yml b/test-assets/role-manifests/model/bad-cv-type.yml index 6f347a5f..82ef16c1 100644 --- a/test-assets/role-manifests/model/bad-cv-type.yml +++ b/test-assets/role-manifests/model/bad-cv-type.yml @@ -15,16 +15,16 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/bosh-run-env.yml b/test-assets/role-manifests/model/bosh-run-env.yml index 2aaedf68..25acf4b0 100644 --- a/test-assets/role-manifests/model/bosh-run-env.yml +++ b/test-assets/role-manifests/model/bosh-run-env.yml @@ -9,6 +9,6 @@ instance_groups: - FOO jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor diff --git a/test-assets/role-manifests/model/bosh-run-missing.yml b/test-assets/role-manifests/model/bosh-run-missing.yml index 776c9250..ec2bdef7 100644 --- a/test-assets/role-manifests/model/bosh-run-missing.yml +++ b/test-assets/role-manifests/model/bosh-run-missing.yml @@ -3,16 +3,16 @@ instance_groups: - name: myrole jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml b/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml index a55ef8af..1fb99f38 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-clustered-tag.yml @@ -10,9 +10,9 @@ instance_groups: tag: shared-data jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -21,7 +21,7 @@ instance_groups: tags: [headless] jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 volumes: diff --git a/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml b/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml index e3ef52a2..a339313b 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-missing-role.yml @@ -6,8 +6,8 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated-typo diff --git a/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml b/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml index 3588523d..89da8264 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-no-port-collision.yml @@ -17,9 +17,9 @@ instance_groups: - headless jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -27,7 +27,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 exposed-ports: diff --git a/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml b/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml index 3c4f5fc2..f8c5b10e 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-port-collision.yml @@ -20,9 +20,9 @@ instance_groups: - headless jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -30,7 +30,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 exposed-ports: diff --git a/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml b/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml index 39e7900a..9b98cf9c 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-unused-role.yml @@ -6,9 +6,9 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -16,7 +16,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 @@ -24,6 +24,6 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 diff --git a/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml b/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml index c0ab57be..50e4d593 100644 --- a/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml +++ b/test-assets/role-manifests/model/colocated-containers-with-volume-share-issues.yml @@ -19,9 +19,9 @@ instance_groups: tag: mount-share jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -29,7 +29,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 volumes: diff --git a/test-assets/role-manifests/model/colocated-containers.yml b/test-assets/role-manifests/model/colocated-containers.yml index d09fb1e1..78bc43ab 100644 --- a/test-assets/role-manifests/model/colocated-containers.yml +++ b/test-assets/role-manifests/model/colocated-containers.yml @@ -10,9 +10,9 @@ instance_groups: tag: shared-data jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor colocated_containers: - to-be-colocated @@ -20,7 +20,7 @@ instance_groups: type: colocated-container jobs: - name: ntpd - release_name: ntp + release: ntp run: memory: 1 volumes: diff --git a/test-assets/role-manifests/model/multiple-bad.yml b/test-assets/role-manifests/model/multiple-bad.yml index 09ae0635..f5611b57 100644 --- a/test-assets/role-manifests/model/multiple-bad.yml +++ b/test-assets/role-manifests/model/multiple-bad.yml @@ -6,13 +6,13 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: ntpd - release_name: ntp + release: ntp - name: foorole type: bosh-task jobs: - name: ntpd - release_name: foo + release: foo diff --git a/test-assets/role-manifests/model/multiple-good.yml b/test-assets/role-manifests/model/multiple-good.yml index 1e1a7c3a..082b1a8b 100644 --- a/test-assets/role-manifests/model/multiple-good.yml +++ b/test-assets/role-manifests/model/multiple-good.yml @@ -6,15 +6,15 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: ntpd - release_name: ntp + release: ntp - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor diff --git a/test-assets/role-manifests/model/non-bosh-roles.yml b/test-assets/role-manifests/model/non-bosh-roles.yml index 5bad5542..84015ad2 100644 --- a/test-assets/role-manifests/model/non-bosh-roles.yml +++ b/test-assets/role-manifests/model/non-bosh-roles.yml @@ -6,16 +6,16 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor - name: dockerrole type: docker fookey: somevalue diff --git a/test-assets/role-manifests/model/templates-non.yml b/test-assets/role-manifests/model/templates-non.yml index dd2b1992..555ffa25 100644 --- a/test-assets/role-manifests/model/templates-non.yml +++ b/test-assets/role-manifests/model/templates-non.yml @@ -14,16 +14,16 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/tor-bad.yml b/test-assets/role-manifests/model/tor-bad.yml index 1b2e57cc..7ebb62a7 100644 --- a/test-assets/role-manifests/model/tor-bad.yml +++ b/test-assets/role-manifests/model/tor-bad.yml @@ -5,12 +5,12 @@ instance_groups: memory: 1 jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole run: memory: 1 jobs: - name: foo # This job should not exist - release_name: tor + release: tor diff --git a/test-assets/role-manifests/model/tor-good.yml b/test-assets/role-manifests/model/tor-good.yml index 65fdcca1..5d235a03 100644 --- a/test-assets/role-manifests/model/tor-good.yml +++ b/test-assets/role-manifests/model/tor-good.yml @@ -14,13 +14,13 @@ instance_groups: foo: x jobs: - name: new_hostname - release_name: tor + release: tor - name: tor - release_name: tor + release: tor - name: foorole type: bosh-task run: foo: x jobs: - name: tor - release_name: tor + release: tor diff --git a/test-assets/role-manifests/model/variable-expansion.yml b/test-assets/role-manifests/model/variable-expansion.yml index cc3bf6c1..ccbfc174 100644 --- a/test-assets/role-manifests/model/variable-expansion.yml +++ b/test-assets/role-manifests/model/variable-expansion.yml @@ -7,7 +7,7 @@ instance_groups: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/variables-without-decl.yml b/test-assets/role-manifests/model/variables-without-decl.yml index cb1f7f70..0e006ac7 100644 --- a/test-assets/role-manifests/model/variables-without-decl.yml +++ b/test-assets/role-manifests/model/variables-without-decl.yml @@ -6,7 +6,7 @@ instance_groups: foo: x jobs: - name: tor - release_name: tor + release: tor configuration: variables: - name: BAR diff --git a/test-assets/role-manifests/model/variables-without-usage.yml b/test-assets/role-manifests/model/variables-without-usage.yml index 7451bdc4..067fff9e 100644 --- a/test-assets/role-manifests/model/variables-without-usage.yml +++ b/test-assets/role-manifests/model/variables-without-usage.yml @@ -4,7 +4,7 @@ instance_groups: - name: myrole jobs: - name: tor - release_name: tor + release: tor run: {} configuration: variables: