diff --git a/README.md b/README.md index d8b1f7f..1b3be7f 100644 --- a/README.md +++ b/README.md @@ -13,12 +13,30 @@ podman pull quay.io/konveyor/kantra:latest && podman run --name kantra-download ### MacOS -**Note:** On MacOS, in order to correctly mount volumes, your podman machine must contain options: +**Note:** There is a known [issue](https://github.com/containers/podman/issues/16106) +with limited number of open files in mounted volumes on MacOS, which may affect kantra performance. + +Prior to starting your podman machine, run: + +```sh +ulimit -n unlimited +``` + + - This must be run after each podman machine reboot. + +In order to correctly mount volumes, your podman machine must contain options: ```sh podman machine init -v $HOME:$HOME -v /private/tmp:/private/tmp -v /var/folders/:/var/folders/ ``` +Increase podman resources: + +```sh +podman machine set --cpus 4 --memory 4096 +``` + + Ensure that we use the connection to the VM `` we created earlier by default: ```sh @@ -75,6 +93,7 @@ Flags: Use "kantra [command] --help" for more information about a command. ``` + ### Analyze Analyze allows running source code and binary analysis using [analyzer-lsp](https://github.com/konveyor/analyzer-lsp) @@ -188,50 +207,7 @@ To run `transform rules` on application source code, run: kantra transform rules --input= --output= ``` -## Quick Demos - -Once you have kantra installed, these examples will help you run both an -analyze and a transform command. - -### Analyze - -- Get the example application to run analysis on -`git clone https://github.com/konveyor/example-applications` - -- List available target technologies -`kantra analyze --list-targets` - -- Run analysis with a specified target technology -`kantra analyze --input= --output= --target=cloud-readiness` - -- Several analysis reports will have been created in your specified output path: - -```sh -$ ls ./output/ -1 -analysis.log -dependencies.yaml -dependency.log -output.yaml -static-report -``` - -`output.yaml` is the file that contains issues report. -`static-report` contains the static HTML report. -`dependencies.yaml`contains a dependencies report. - -### Transform - -- Get the example application to transform source code -`git clone https://github.com/ivargrimstad/jakartaee-duke` - -- View available OpenRewrite recipes -`kantra transform openrewrite --list-targets` - -- Run a recipe on the example application -`kantra transform openrewrite --input= --target=jakarta-imports` - -- Inspect the `jakartaee-duke` application source code diff to see the transformation - +### analyze and transform [examples](./docs/example.md) ## Code of Conduct Refer to Konveyor's Code of Conduct [here](https://github.com/konveyor/community/blob/main/CODE_OF_CONDUCT.md). diff --git a/cmd/analyze.go b/cmd/analyze.go index ef3b696..c857f6f 100644 --- a/cmd/analyze.go +++ b/cmd/analyze.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "os" "path" @@ -29,6 +28,8 @@ import ( ) var ( + // TODO (pgaikwad): this assumes that the $USER in container is always root, it may not be the case in future + M2Dir = path.Join("/", "root", ".m2") // application source path inside the container SourceMountPath = path.Join(InputPath, "source") // analyzer config files @@ -48,6 +49,7 @@ type analyzeCommand struct { skipStaticReport bool analyzeKnownLibraries bool jsonOutput bool + overwrite bool mavenSettingsFile string sources []string targets []string @@ -56,18 +58,22 @@ type analyzeCommand struct { output string mode string rules []string + jaegerEndpoint string // tempDirs list of temporary dirs created, used for cleanup tempDirs []string log logr.Logger // isFileInput is set when input points to a file and not a dir isFileInput bool + logLevel *uint32 + cleanup bool } // analyzeCmd represents the analyze command func NewAnalyzeCmd(log logr.Logger) *cobra.Command { analyzeCmd := &analyzeCommand{ - log: log, + log: log, + cleanup: true, } analyzeCommand := &cobra.Command{ @@ -91,6 +97,12 @@ func NewAnalyzeCmd(log logr.Logger) *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { + if val, err := cmd.Flags().GetUint32(logLevelFlag); err == nil { + analyzeCmd.logLevel = &val + } + if val, err := cmd.Flags().GetBool(noCleanupFlag); err == nil { + analyzeCmd.cleanup = !val + } if analyzeCmd.listSources || analyzeCmd.listTargets { err := analyzeCmd.ListLabels(cmd.Context()) if err != nil { @@ -122,9 +134,6 @@ func NewAnalyzeCmd(log logr.Logger) *cobra.Command { return nil }, PostRunE: func(cmd *cobra.Command, args []string) error { - if cmd.PersistentFlags().Changed(noCleanupFlag) { - return nil - } err := analyzeCmd.Clean(cmd.Context()) if err != nil { log.Error(err, "failed to clean temporary container resources") @@ -146,6 +155,8 @@ func NewAnalyzeCmd(log logr.Logger) *cobra.Command { analyzeCommand.Flags().StringVar(&analyzeCmd.mavenSettingsFile, "maven-settings", "", "path to a custom maven settings file to use") analyzeCommand.Flags().StringVarP(&analyzeCmd.mode, "mode", "m", string(provider.FullAnalysisMode), "analysis mode. Must be one of 'full' or 'source-only'") analyzeCommand.Flags().BoolVar(&analyzeCmd.jsonOutput, "json-output", false, "create analysis and dependency output as json") + analyzeCommand.Flags().BoolVar(&analyzeCmd.overwrite, "overwrite", false, "overwrite output directory") + analyzeCommand.Flags().StringVar(&analyzeCmd.jaegerEndpoint, "jaeger-endpoint", "", "jaeger endpoint to collect traces") return analyzeCommand } @@ -157,6 +168,10 @@ func (a *analyzeCommand) Validate() error { if a.labelSelector != "" && (len(a.sources) > 0 || len(a.targets) > 0) { return fmt.Errorf("must not specify label-selector and sources or targets") } + err := a.CheckOverwriteOutput() + if err != nil { + return err + } stat, err := os.Stat(a.output) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -206,6 +221,26 @@ func (a *analyzeCommand) Validate() error { return nil } +func (a *analyzeCommand) CheckOverwriteOutput() error { + // default overwrite to false so check for already existing output dir + stat, err := os.Stat(a.output) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } + if !a.overwrite && stat != nil { + return fmt.Errorf("output dir %v already exists and --overwrite not set", a.output) + } + if a.overwrite && stat != nil { + err := os.RemoveAll(a.output) + if err != nil { + return err + } + } + return nil +} + func (a *analyzeCommand) ListLabels(ctx context.Context) error { // reserved labels sourceLabel := outputv1.SourceTechnologyLabel @@ -249,6 +284,7 @@ func (a *analyzeCommand) ListLabels(ctx context.Context) error { WithVolumes(volumes), WithEntrypointBin("/usr/local/bin/kantra"), WithEntrypointArgs(args...), + WithCleanup(a.cleanup), ) if err != nil { a.log.Error(err, "failed listing labels") @@ -328,6 +364,8 @@ func listOptionsFromLabels(sl []string, label string) { fmt.Println("available target technologies:") } for _, tech := range newSl { + tech = strings.TrimSuffix(tech, "+") + tech = strings.TrimSuffix(tech, "-") fmt.Println(tech) } } @@ -372,6 +410,9 @@ func (a *analyzeCommand) getConfigVolumes() (map[string]string, error) { } javaConfig.InitConfig[0].ProviderSpecificConfig["mavenSettingsFile"] = fmt.Sprintf("%s/%s", ConfigMountPath, "settings.xml") } + if Settings.JvmMaxMem != "" { + javaConfig.InitConfig[0].ProviderSpecificConfig["jvmMaxMem"] = Settings.JvmMaxMem + } provConfig := []provider.Config{ { @@ -405,14 +446,28 @@ func (a *analyzeCommand) getConfigVolumes() (map[string]string, error) { a.log.V(1).Error(err, "failed to marshal provider config") return nil, err } - err = ioutil.WriteFile(filepath.Join(tempDir, "settings.json"), jsonData, os.ModePerm) + err = os.WriteFile(filepath.Join(tempDir, "settings.json"), jsonData, os.ModePerm) if err != nil { - a.log.V(1).Error(err, "failed to write provider config", "dir", tempDir, "file", "settings.json") + a.log.V(1).Error(err, + "failed to write provider config", "dir", tempDir, "file", "settings.json") return nil, err } - return map[string]string{ + + vols := map[string]string{ tempDir: ConfigMountPath, - }, nil + } + // attempt to create a .m2 directory we can use to speed things a bit + // this will be shared between analyze and dep command containers + m2Dir, err := os.MkdirTemp("", "m2-repo-") + if err != nil { + a.log.V(1).Error(err, "failed to create m2 repo", "dir", m2Dir) + } else { + vols[m2Dir] = M2Dir + a.log.V(1).Info("created directory for maven repo", "dir", m2Dir) + a.tempDirs = append(a.tempDirs, m2Dir) + } + + return vols, nil } func (a *analyzeCommand) getRulesVolumes() (map[string]string, error) { @@ -491,7 +546,7 @@ func createTempRuleSet(path string) error { if err != nil { return err } - err = ioutil.WriteFile(path, yamlData, os.ModePerm) + err = os.WriteFile(path, yamlData, os.ModePerm) if err != nil { return err } @@ -535,11 +590,18 @@ func (a *analyzeCommand) RunAnalysis(ctx context.Context, xmlOutputDir string) e fmt.Sprintf("--output-file=%s", AnalysisOutputMountPath), fmt.Sprintf("--context-lines=%d", 100), } + if a.jaegerEndpoint != "" { + args = append(args, "--enable-jaeger") + args = append(args, "--jaeger-endpoint") + args = append(args, a.jaegerEndpoint) + } if !a.analyzeKnownLibraries { args = append(args, fmt.Sprintf("--dep-label-selector=(!%s=open-source)", provider.DepSourceLabel)) } - + if a.logLevel != nil { + args = append(args, fmt.Sprintf("--verbose=%d", *a.logLevel)) + } labelSelector := a.getLabelSelector() if labelSelector != "" { args = append(args, fmt.Sprintf("--label-selector=%s", labelSelector)) @@ -561,14 +623,16 @@ func (a *analyzeCommand) RunAnalysis(ctx context.Context, xmlOutputDir string) e a.log.Info("running source code analysis", "log", analysisLogFilePath, "input", a.input, "output", a.output, "args", strings.Join(args, " "), "volumes", volumes) + a.log.Info("generating analysis log in file", "file", analysisLogFilePath) // TODO (pgaikwad): run analysis & deps in parallel err = NewContainer(a.log).Run( ctx, WithVolumes(volumes), - WithStdout(os.Stdout, analysisLog), - WithStderr(os.Stdout, analysisLog), + WithStdout(analysisLog), + WithStderr(analysisLog), WithEntrypointArgs(args...), WithEntrypointBin("/usr/bin/konveyor-analyzer"), + WithCleanup(a.cleanup), ) if err != nil { return err @@ -576,16 +640,18 @@ func (a *analyzeCommand) RunAnalysis(ctx context.Context, xmlOutputDir string) e a.log.Info("running dependency analysis", "log", depsLogFilePath, "input", a.input, "output", a.output, "args", strings.Join(args, " ")) + a.log.Info("generating dependency log in file", "file", depsLogFilePath) err = NewContainer(a.log).Run( ctx, - WithStdout(os.Stdout, dependencyLog), - WithStderr(os.Stderr, dependencyLog), + WithStdout(dependencyLog), + WithStderr(dependencyLog), WithVolumes(volumes), WithEntrypointBin("/usr/bin/konveyor-analyzer-dep"), WithEntrypointArgs( fmt.Sprintf("--output-file=%s", DepsOutputMountPath), fmt.Sprintf("--provider-settings=%s", ProviderSettingsMountPath), ), + WithCleanup(a.cleanup), ) if err != nil { return err @@ -601,7 +667,7 @@ func (a *analyzeCommand) CreateJSONOutput() error { outputPath := filepath.Join(a.output, "output.yaml") depPath := filepath.Join(a.output, "dependencies.yaml") - data, err := ioutil.ReadFile(outputPath) + data, err := os.ReadFile(outputPath) if err != nil { return err } @@ -617,13 +683,13 @@ func (a *analyzeCommand) CreateJSONOutput() error { a.log.V(1).Error(err, "failed to marshal output file to json") return err } - err = ioutil.WriteFile(filepath.Join(a.output, "output.json"), jsonData, os.ModePerm) + err = os.WriteFile(filepath.Join(a.output, "output.json"), jsonData, os.ModePerm) if err != nil { a.log.V(1).Error(err, "failed to write json output", "dir", a.output, "file", "output.json") return err } - depData, err := ioutil.ReadFile(depPath) + depData, err := os.ReadFile(depPath) if err != nil { return err } @@ -639,7 +705,7 @@ func (a *analyzeCommand) CreateJSONOutput() error { a.log.V(1).Error(err, "failed to marshal dependencies file to json") return err } - err = ioutil.WriteFile(filepath.Join(a.output, "dependencies.json"), jsonDataDep, os.ModePerm) + err = os.WriteFile(filepath.Join(a.output, "dependencies.json"), jsonDataDep, os.ModePerm) if err != nil { a.log.V(1).Error(err, "failed to write json dependencies output", "dir", a.output, "file", "dependencies.json") return err @@ -681,6 +747,7 @@ func (a *analyzeCommand) GenerateStaticReport(ctx context.Context) error { WithEntrypointArgs(staticReportCmd...), WithVolumes(volumes), WithcFlag(true), + WithCleanup(a.cleanup), ) if err != nil { return err @@ -693,6 +760,9 @@ func (a *analyzeCommand) GenerateStaticReport(ctx context.Context) error { } func (a *analyzeCommand) Clean(ctx context.Context) error { + if !a.cleanup { + return nil + } for _, path := range a.tempDirs { err := os.RemoveAll(path) if err != nil { @@ -752,11 +822,7 @@ func (a *analyzeCommand) getLabelSelector() string { } func isXMLFile(rule string) bool { - extension := path.Ext(rule) - if extension == ".xml" { - return true - } - return false + return path.Ext(rule) == ".xml" } func (a *analyzeCommand) getXMLRulesVolumes(tempRuleDir string) (map[string]string, error) { @@ -806,8 +872,10 @@ func (a *analyzeCommand) ConvertXML(ctx context.Context) (string, error) { a.log.V(1).Error(err, "failed to create temp dir for rules") return "", err } - a.log.V(1).Info("created directory for converted XML rules", "dir", tempDir) - defer os.RemoveAll(tempDir) + a.log.V(1).Info("created directory for converted XML rules", "dir", tempOutputDir) + if a.cleanup { + defer os.RemoveAll(tempDir) + } volumes := map[string]string{ tempOutputDir: ShimOutputPath, } @@ -819,16 +887,28 @@ func (a *analyzeCommand) ConvertXML(ctx context.Context) (string, error) { } maps.Copy(volumes, ruleVols) - a.log.Info("running windup shim", "output", a.output) + shimLogPath := filepath.Join(a.output, "shim.log") + shimLog, err := os.Create(shimLogPath) + if err != nil { + return "", fmt.Errorf("failed creating shim log file %s", shimLogPath) + } + defer shimLog.Close() + args := []string{"convert", fmt.Sprintf("--outputdir=%v", ShimOutputPath), XMLRulePath, } + a.log.Info("running windup shim", + "output", a.output, "args", strings.Join(args, " "), "volumes", volumes) + a.log.Info("generating shim log in file", "file", shimLogPath) err = NewContainer(a.log).Run( ctx, + WithStdout(shimLog), + WithStderr(shimLog), WithVolumes(volumes), WithEntrypointArgs(args...), WithEntrypointBin("/usr/local/bin/windup-shim"), + WithCleanup(a.cleanup), ) if err != nil { return "", err diff --git a/cmd/container.go b/cmd/container.go index 9bd61ba..6e8b00e 100644 --- a/cmd/container.go +++ b/cmd/container.go @@ -128,34 +128,11 @@ func NewContainer(log logr.Logger) *container { } } -func (c *container) Exists(ctx context.Context) (bool, error) { - cmd := exec.CommandContext(ctx, - Settings.PodmanBinary, - "ps", "-a", "--format", "{{.Names}}") - output, err := cmd.CombinedOutput() - if err != nil { - return false, fmt.Errorf("%w failed checking status of container %s", err, c.name) - } - for _, found := range strings.Split(string(output), "\n") { - if found == c.name { - return true, nil - } - } - return false, nil -} - func (c *container) Run(ctx context.Context, opts ...Option) error { var err error for _, opt := range opts { opt(c) } - exists, err := c.Exists(ctx) - if err != nil { - return fmt.Errorf("%w failed to check status of container %s", err, c.name) - } - if exists { - return fmt.Errorf("container %s already exists, must remove existing before running", c.name) - } args := []string{"run"} os := runtime.GOOS if c.cleanup { @@ -175,7 +152,6 @@ func (c *container) Run(ctx context.Context, opts ...Option) error { } for sourcePath, destPath := range c.volumes { args = append(args, "-v") - // TODO: check this on windows if os == "linux" { args = append(args, fmt.Sprintf("%s:%s:Z", filepath.Clean(sourcePath), path.Clean(destPath))) @@ -219,34 +195,7 @@ func (c *container) Run(ctx context.Context, opts ...Option) error { return nil } -func (c *container) Cp(ctx context.Context, src string, dest string) error { - if src == "" || dest == "" { - return fmt.Errorf("source or dest cannot be empty") - } - exists, err := c.Exists(ctx) - if err != nil { - return err - } - if !exists { - return fmt.Errorf("container %s does not exist, cannot copy from non-existing container", c.name) - } - cmd := exec.CommandContext( - ctx, - Settings.PodmanBinary, - "cp", fmt.Sprintf("%s:%s", c.name, src), dest) - c.log.V(1).Info("copying files from container", - "podman", Settings.PodmanBinary, "src", src, "dest", dest) - return cmd.Run() -} - func (c *container) Rm(ctx context.Context) error { - exists, err := c.Exists(ctx) - if err != nil { - return err - } - if !exists { - return nil - } cmd := exec.CommandContext( ctx, Settings.PodmanBinary, diff --git a/cmd/openrewrite.go b/cmd/openrewrite.go index 633affa..c8d0d06 100644 --- a/cmd/openrewrite.go +++ b/cmd/openrewrite.go @@ -18,11 +18,13 @@ type openRewriteCommand struct { goal string miscOpts string log logr.Logger + cleanup bool } func NewOpenRewriteCommand(log logr.Logger) *cobra.Command { openRewriteCmd := &openRewriteCommand{ - log: log, + log: log, + cleanup: true, } openRewriteCommand := &cobra.Command{ @@ -36,6 +38,9 @@ func NewOpenRewriteCommand(log logr.Logger) *cobra.Command { } }, RunE: func(cmd *cobra.Command, args []string) error { + if val, err := cmd.Flags().GetBool(noCleanupFlag); err == nil { + openRewriteCmd.cleanup = !val + } err := openRewriteCmd.Validate() if err != nil { log.Error(err, "failed validating input args") @@ -144,6 +149,7 @@ func (o *openRewriteCommand) Run(ctx context.Context) error { WithEntrypointBin("/usr/bin/mvn"), WithVolumes(volumes), WithWorkDir(InputPath), + WithCleanup(o.cleanup), ) if err != nil { o.log.V(1).Error(err, "error running openrewrite") diff --git a/cmd/root.go b/cmd/root.go index 03ffd37..dfedd58 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -15,6 +15,7 @@ import ( const ( noCleanupFlag = "no-cleanup" + logLevelFlag = "log-level" ) var logLevel uint32 @@ -23,7 +24,6 @@ var noCleanup bool // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ - // TODO: better descriptions Short: "A cli tool for analysis and transformation of applications", Long: ``, SilenceUsage: true, @@ -36,7 +36,7 @@ var rootCmd = &cobra.Command{ } func init() { - rootCmd.PersistentFlags().Uint32Var(&logLevel, "log-level", 4, "log level") + rootCmd.PersistentFlags().Uint32Var(&logLevel, logLevelFlag, 4, "log level") rootCmd.PersistentFlags().BoolVar(&noCleanup, noCleanupFlag, false, "do not cleanup temporary resources") logrusLog = logrus.New() diff --git a/cmd/settings.go b/cmd/settings.go index 379cde0..6575085 100644 --- a/cmd/settings.go +++ b/cmd/settings.go @@ -22,13 +22,18 @@ type Config struct { RootCommandName string `env:"CMD_NAME" default:"kantra"` PodmanBinary string `env:"PODMAN_BIN" default:"/usr/bin/podman"` RunnerImage string `env:"RUNNER_IMG" default:"quay.io/konveyor/kantra"` + JvmMaxMem string `env:"JVM_MAX_MEM" default:""` } func (c *Config) Load() error { - podmanPath, _ := exec.LookPath("podman") - if podmanPath != c.PodmanBinary && (podmanPath != "" || len(podmanPath) > 0) { - os.Setenv("PODMAN_BIN", podmanPath) + envValue := os.Getenv("PODMAN_BIN") + if envValue == "" { + podmanPath, _ := exec.LookPath("podman") + if podmanPath != c.PodmanBinary && (podmanPath != "" || len(podmanPath) > 0) { + os.Setenv("PODMAN_BIN", podmanPath) + } } + err := env.Set(c) if err != nil { return err diff --git a/cmd/shimconvert.go b/cmd/shimconvert.go index 9f7f5bb..87734e8 100644 --- a/cmd/shimconvert.go +++ b/cmd/shimconvert.go @@ -18,12 +18,14 @@ type windupShimCommand struct { input []string output string - log logr.Logger + log logr.Logger + cleanup bool } func NewWindupShimCommand(log logr.Logger) *cobra.Command { windupShimCmd := &windupShimCommand{ - log: log, + log: log, + cleanup: true, } windupShimCommand := &cobra.Command{ @@ -45,6 +47,9 @@ func NewWindupShimCommand(log logr.Logger) *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { + if val, err := cmd.Flags().GetBool(noCleanupFlag); err == nil { + windupShimCmd.cleanup = !val + } err := windupShimCmd.Run(cmd.Context()) if err != nil { log.Error(err, "failed to execute windup shim") @@ -129,7 +134,10 @@ func (w *windupShimCommand) Run(ctx context.Context) error { w.log.V(1).Error(err, "failed to create temp dir for rules") return err } - defer os.RemoveAll(tempDir) + w.log.V(1).Info("created temp directory for XML rules", "dir", tempDir) + if w.cleanup { + defer os.RemoveAll(tempDir) + } volumes := map[string]string{ w.output: ShimOutputPath, } @@ -140,17 +148,28 @@ func (w *windupShimCommand) Run(ctx context.Context) error { } maps.Copy(volumes, ruleVols) + shimLogPath := filepath.Join(w.output, "shim.log") + shimLog, err := os.Create(shimLogPath) + if err != nil { + return fmt.Errorf("failed creating shim log file %s", shimLogPath) + } + defer shimLog.Close() + args := []string{"convert", fmt.Sprintf("--outputdir=%v", ShimOutputPath), XMLRulePath, } w.log.Info("running windup-shim convert command", "args", strings.Join(args, " "), "volumes", volumes, "output", w.output, "inputs", strings.Join(w.input, ",")) + w.log.Info("generating shim log in file", "file", shimLogPath) err = NewContainer(w.log).Run( ctx, WithVolumes(volumes), + WithStdout(shimLog), + WithStderr(shimLog), WithEntrypointArgs(args...), WithEntrypointBin("/usr/local/bin/windup-shim"), + WithCleanup(w.cleanup), ) if err != nil { w.log.V(1).Error(err, "failed to run convert command") diff --git a/docs/analyzer.md b/docs/analyzer.md new file mode 100644 index 0000000..4c6486b --- /dev/null +++ b/docs/analyzer.md @@ -0,0 +1,11 @@ +## Analyzer-lsp Documentation + +### kantra analyze runs [analyzer-lsp](https://github.com/konveyor/analyzer-lsp) + +- To better understand how rules are used in analysis, see the [rules](https://github.com/konveyor/analyzer-lsp/blob/main/docs/rules.md) +documentation + +- If a rule is matched, it creates a [violation](https://github.com/konveyor/analyzer-lsp/blob/main/docs/violations.md) + +- [Labels](https://github.com/konveyor/analyzer-lsp/blob/main/docs/labels.md) +can be used to filter in and out rules diff --git a/docs/developer.md b/docs/developer.md new file mode 100644 index 0000000..b42161e --- /dev/null +++ b/docs/developer.md @@ -0,0 +1,16 @@ +### Running kantra + +Two environment variables control the container runtime and the kantra image: `PODMAN_BIN` and `RUNNER_IMG`: +- `PODMAN_BIN`: path to your container runtime (podman or docker) +- `RUNNER_IMG`: the tag of the kantra image to invoke + +#### example: + +`podman build -f Dockerfile -t kantra:dev` + +`RUNNER_IMG=kantra:dev PODMAN_BIN=/usr/local/bin/podman go run main.go analyze --input= --output=./output` + +#### Helpful flags: + +- To increase logs for debugging, you can set `--log-level` (default is 5) +- ie: `--log-level=7` diff --git a/docs/example.md b/docs/example.md new file mode 100644 index 0000000..a4b96d8 --- /dev/null +++ b/docs/example.md @@ -0,0 +1,41 @@ +Once you have kantra installed, these examples will help you run both an +analyze and a transform command. + +### Analyze + +- Get the example application to run analysis on +`git clone https://github.com/konveyor/example-applications` + +- List available target technologies +`kantra analyze --list-targets` + +- Run analysis with a specified target technology +`kantra analyze --input= --output= --target=cloud-readiness` + +- Several analysis reports will have been created in your specified output path: + +```sh +$ ls ./output/ -1 +analysis.log +dependencies.yaml +dependency.log +output.yaml +static-report +``` + +`output.yaml` is the file that contains issues report. +`static-report` contains the static HTML report. +`dependencies.yaml`contains a dependencies report. + +### Transform + +- Get the example application to transform source code +`git clone https://github.com/ivargrimstad/jakartaee-duke` + +- View available OpenRewrite recipes +`kantra transform openrewrite --list-targets` + +- Run a recipe on the example application +`kantra transform openrewrite --input= --target=jakarta-imports` + +- Inspect the `jakartaee-duke` application source code diff to see the transformation diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 0000000..f08ad5a --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,15 @@ +## Output + + +#### Default rules: + +- the analyze command runs against a set of packaged rules [here](https://github.com/konveyor/rulesets/) +- `--label-selector` and/or `--target` can filter these rules +- `--rules` can be provided to run analyze on rules outside of this set + +#### `--rules` + `--target` + +- In kantra, if a rule is given as well as a target, but the given rule **does not** +have the target label, the given rule will not match. + - You must add the target label to the custom rule (if applicable) in + order to run this rule. diff --git a/go.mod b/go.mod index 7168d1d..1a683da 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/bombsimon/logrusr/v3 v3.1.0 github.com/codingconcepts/env v0.0.0-20200821220118-a8fbf8d84482 github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/konveyor/analyzer-lsp v0.3.0-alpha.1 + github.com/konveyor/analyzer-lsp v0.3.0-beta.1.2 github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 github.com/spf13/pflag v1.0.5 // indirect diff --git a/go.sum b/go.sum index a0143ff..6e77c7c 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/konveyor/analyzer-lsp v0.3.0-alpha.1 h1:RilOnB9E6+zDSDQs4vBVPMITuSD3o+6P0dkXWW2H3YQ= -github.com/konveyor/analyzer-lsp v0.3.0-alpha.1/go.mod h1:Rv2WcWfVMEGEWqn0Fl4U4NcmJYPrmWdPtaFE9KDVVF8= +github.com/konveyor/analyzer-lsp v0.3.0-beta.1.2 h1:yZh5rTZYq1XYzDei3/zwZOqE8XktftaETNycV3Q7ACE= +github.com/konveyor/analyzer-lsp v0.3.0-beta.1.2/go.mod h1:zJCmIq08X0kPvtU8ZSmz+mZmQfBt4hdy9enoEy1AQw4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= diff --git a/hack/.gitignore b/hack/.gitignore new file mode 100644 index 0000000..960635b --- /dev/null +++ b/hack/.gitignore @@ -0,0 +1,3 @@ +kantra +tmp + diff --git a/hack/README.md b/hack/README.md new file mode 100644 index 0000000..4f5a82b --- /dev/null +++ b/hack/README.md @@ -0,0 +1,44 @@ +# Scripts to help with various 'hacking' needs + +## Running on Mac (darwin) +* `darwin_get_latest_kantra_cli.sh`: Run to fetch latest image and extract kantra binary for Mac + +* `darwin_restart_podman_machine.sh`: Help to create/restart a podman machine VM to use with Kantra on Mac + * Can customize like: `CPUS=8 MEM=16384 ./darwin_restart_podman_machine.sh` + +## General run + +* `sample_jakartaee_duke_analyze.sh`: Clone and analyze a jakartaee sample + +## Gather analysis traces + +Full traces of the analysis process can be collected via Jaeger. + +First, run the Jaeger container that will collect traces from analysis: + +```sh +podman run -d --net=host --name jaeger -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 jaegertracing/all-in-one:1.23 +``` + +> Note that we are running Jaeger in `host` network so that analyzer container can communicate with it later. There are several services running in the container, it could happen that the ports they use are pre-occupied on your host. In that case, you will need to free them up manually. + +The Jaeger collector listens on `14268` while the UI listens on `16686` in the container. + +Now, we will run analysis by enabling the tracer that will send traces to our Jaeger collector. + +To do that, pass `--jaeger-endpoint` flag with the value being the collector endpoint: + +```sh +kantra analyze --jaeger-enpoint 'http://172.17.0.1:14268/api/traces' +``` + +> Note that `172.17.0.1` is the IP address using which a Podman container can communicate with the host on Linux. It might be different for your system. Alternatively, you can create a network, set it as default and create your Jaeger instance in it to access it via a name instead of IP. + +When analysis finishes, download the traces from Jaeger: + +```sh +curl -o traces.json http://localhost:16686/api/traces?service=analyzer-lsp +``` + +To view the results locally, open [http://localhost:16686/](http://localhost:16686/) in your browser + diff --git a/hack/darwin_get_latest_kantra_cli.sh b/hack/darwin_get_latest_kantra_cli.sh new file mode 100755 index 0000000..e32785f --- /dev/null +++ b/hack/darwin_get_latest_kantra_cli.sh @@ -0,0 +1,4 @@ +#!/bin/sh +: ${TAG="latest"} +podman pull quay.io/konveyor/kantra:$TAG && podman run --name kantra-download quay.io/konveyor/kantra:$TAG 1> /dev/null 2> /dev/null && podman cp kantra-download:/usr/local/bin/darwin-kantra kantra && podman rm kantra-download + diff --git a/hack/darwin_restart_podman_machine.sh b/hack/darwin_restart_podman_machine.sh new file mode 100755 index 0000000..4958135 --- /dev/null +++ b/hack/darwin_restart_podman_machine.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Default variables can be overriden from environment +: ${VM_NAME="kantra"} +: ${MEM=4096} +: ${CPUS=4} +: ${DISK_SIZE=100} + +# See https://github.com/konveyor/kantra/issues/91 +# See https://github.com/containers/podman/issues/16106#issuecomment-1317188581 +# Setting file limits to unlimited on the Mac Host +ulimit -n unlimited +podman machine stop $VM_NAME +podman machine rm $VM_NAME -f +podman machine init $VM_NAME -v $HOME:$HOME -v /private/tmp:/private/tmp -v /var/folders/:/var/folders/ +podman machine set $VM_NAME --cpus $CPUS --memory $MEM --disk-size $DISK_SIZE +podman system connection default $VM_NAME +podman machine start $VM_NAME +# Workaround for setting file limits inside of the podman machine VM +# https://github.com/konveyor/kantra/issues/111 +podman machine ssh $VM_NAME "echo * soft nofile 65535 | sudo tee -a /etc/security/limits.conf" +podman machine ssh $VM_NAME "echo * hard nofile 65535 | sudo tee -a /etc/security/limits.conf" +podman machine ssh $VM_NAME ulimit -n #To confirm the change has taken effect diff --git a/hack/sample_jakartaee_duke_analyze.sh b/hack/sample_jakartaee_duke_analyze.sh new file mode 100755 index 0000000..ea3025c --- /dev/null +++ b/hack/sample_jakartaee_duke_analyze.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# +export PATH=$PATH:./ +mkdir -p tmp +# Go into ./tmp and add an out dir and our sample code with correct branch +cd tmp +mkdir -p out +git clone https://github.com/ivargrimstad/jakartaee-duke.git +cd jakartaee-duke +git checkout start-tutorial +cd ../.. +# Backing out to current working directory +time kantra analyze -i $PWD/tmp/jakartaee-duke -t "jakarta-ee9+" -o $PWD/tmp/out + + diff --git a/test-data/analysis-output.yaml b/test-data/analysis-output.yaml index 029478f..ba78282 100644 --- a/test-data/analysis-output.yaml +++ b/test-data/analysis-output.yaml @@ -9,7 +9,6 @@ - konveyor.io/target=cloud-readiness - konveyor.io/source - jni - - "" incidents: - uri: file:///root/.m2/repository/io/konveyor/demo/config-utils/1.0.0/io/konveyor/demo/config/ApplicationConfiguration.java message: Java native libraries might not run in a cloud or container environment.. Recommendations. * Review the purpose of the native library in your application.. * Check whether the native library is compatible with a cloud environment.. * Reuse or embed the native library or application in a cloud environment, for example, in a JBoss module.. * Replace, remove, or rewrite the native library or application using a cloud-compatible equivalent. @@ -36,24 +35,53 @@ - name: azure/springboot description: Identify Zipkin dependencies. skipped: + - azure-aws-config-credential-01000 + - azure-aws-config-region-02000 + - azure-aws-config-s3-03000 + - azure-aws-config-sqs-04000 + - azure-aws-config-secret-manager-05000 - azure-file-system-01000 - azure-file-system-02000 - azure-file-system-03000 - azure-password-01000 + - azure-java-version-01000 + - azure-java-version-02000 - eap-to-azure-appservice-certificates-001 + - azure-logging-0000 - eap-to-azure-appservice-datasource-driver-01000 + - azure-os-specific-00001 + - azure-os-specific-00002 - eap-to-azure-appservice-environment-variables-001 - eap-to-azure-appservice-pom-001 - spring-boot-to-azure-cache-redis-01000 + - spring-boot-to-azure-cache-redis-02000 - spring-boot-to-azure-database-jdbc-01000 - spring-boot-to-azure-database-mongodb-02000 + - spring-boot-to-azure-database-r2dbc-03000 - spring-boot-to-azure-eureka-01000 + - spring-boot-to-azure-eureka-02000 - spring-boot-to-azure-feign-01000 - spring-boot-to-azure-identity-provider-01000 + - spring-boot-to-azure-config-server-01000 - spring-boot-to-azure-jms-broker-01000 - spring-boot-to-azure-schedule-job-01000 - spring-boot-to-azure-static-content-01000 - spring-boot-to-azure-zipkin-01000 + - spring-boot-to-azure-java-fx-01000 + - spring-boot-to-azure-jks-01000 + - spring-boot-to-azure-mq-config-kafka-01000 + - spring-boot-to-azure-mq-config-rabbitmq-01000 + - spring-boot-to-azure-mq-config-artemis-01000 + - spring-boot-to-azure-port-01000 + - spring-boot-to-azure-swing-01000 + - spring-boot-to-azure-system-config-01000 + - spring-boot-to-azure-version-01000 + - spring-boot-to-azure-version-02000 + - spring-boot-to-azure-version-03000 + - spring-cloud-to-azure-version-01000 + - spring-cloud-to-azure-version-02000 + - spring-cloud-to-azure-version-03000 + - tomcat-to-azure-external-resources-01000 - name: camel3 description: Rules for changes between Camel 3.0 and Camel 4.0 skipped: @@ -212,7 +240,7 @@ - konveyor.io/target=cloud-readiness - discovery incidents: - - uri: file:///opt/input/source/src/main/resources/persistence.properties + - uri: file:///opt/input/source/target/classes/persistence.properties message: When migrating environments, hard-coded IP addresses may need to be modified or eliminated. codeSnip: |2 1 jdbc.driverClassName=oracle.jdbc.driver.OracleDriver @@ -224,7 +252,7 @@ lineNumber: 2 variables: matchingText: 169.60.225.216 - - uri: file:///opt/input/source/target/classes/persistence.properties + - uri: file:///opt/input/source/src/main/resources/persistence.properties message: When migrating environments, hard-coded IP addresses may need to be modified or eliminated. codeSnip: |2 1 jdbc.driverClassName=oracle.jdbc.driver.OracleDriver @@ -243,12 +271,6 @@ - windup-discover-spring-configuration - windup-discover-jpa-configuration - windup-discover-web-configuration - - discover-license - - discover-manifest-file - - windup-discover-spring-configuration - - windup-discover-jpa-configuration - - windup-discover-web-configuration - - windup-discover-jpa-configuration - name: droolsjbpm description: This ruleset provides help for migrating to a unified KIE (Knowledge Is Everything) API in the upgrade from version 5 to 6. skipped: @@ -1616,22 +1638,28 @@ - embedded-cache-libraries-13000 - embedded-cache-libraries-14000 - embedded-cache-libraries-15000 + - embedded-cache-libraries-16000 - logging-0000 - logging-0001 - jca-00000 - - local-storage-00003 + - java-corba-00000 + - java-rpc-00000 - local-storage-00004 - java-rmi-00000 - - session-00000 + - localhost-http-00001 - logging-0001 - - local-storage-00002 + - localhost-jdbc-00002 + - localhost-ws-00003 + - jni-native-code-00001 + - session-00000 + - mail-00000 - socket-communication-00001 - - session-00001 - logging-0000 - - mail-00000 - - java-rpc-00000 - - socket-communication-00000 - local-storage-00005 + - local-storage-00006 + - session-00001 + - socket-communication-00000 + - local-storage-00002 - name: os/windows description: This is a ruleset for Windows operating system specific rules while migrating to Linux operating system. skipped: @@ -1814,56 +1842,78 @@ - name: technology-usage description: This ruleset adds tags the configuration management technologies so are appropriately categorized on the technology reports. tags: - - Bean=EJB XML - Java EE=EJB XML - Connect=EJB XML + - Bean=EJB XML + - Other=Properties - Sustain=Properties - Embedded=Properties - - Other=Properties - Spring Properties - Spring configuration properties annotation detected - - Spring Scheduled + - Spring Data JPA + - Embedded Spring Data JPA + - Spring DI + - Embedded framework - Spring DI + - Micrometer + - Embedded framework - Micrometer + - Spring Web + - Embedded framework - Spring Web - Embedded framework - Spring Scheduled + - Spring Scheduled - Java Threads - Threads - Java EE Batch + - Java EE=Java EE Batch - Execute=Java EE Batch - Processing=Java EE Batch - - Java EE=Java EE Batch - CDI - - Execute=CDI - Inversion of Control=CDI - Java EE=CDI + - Execute=CDI - Java EE JSON-P + - Execute=Java EE JSON-P - Processing=Java EE JSON-P - Java EE=Java EE JSON-P - - Execute=Java EE JSON-P - Servlet - Java Servlet - HTTP=Servlet - Connect=Servlet - Java EE=Servlet - - Spring MVC - Embedded framework - Spring MVC + - Spring MVC + - Spring Boot Actuator + - Embedded library - Spring Boot Actuator - Spring JMX - Spring JMX configuration detected + - Embedded=Spring DI + - Inversion of Control=Spring DI + - Execute=Spring DI + - Execute=Micrometer + - Embedded=Micrometer + - Integration=Micrometer + - View=Spring Web + - Web=Spring Web + - Embedded=Spring Web - Execute=Spring Scheduled - Processing=Spring Scheduled - Embedded=Spring Scheduled + - Store=Spring Data JPA + - Persistence=Spring Data JPA + - Embedded=Spring Data JPA - Connect=JNI - Other=JNI - Java EE=JNI - - Java EE=Mail - Mail usage - Connect=Mail - Other=Mail + - Java EE=Mail - Connect=JCA - Other=JCA - Java EE=JCA - JCA usage - - Configuration Management=Spring Properties - Embedded=Spring Properties - Sustain=Spring Properties + - Configuration Management=Spring Properties unmatched: - technology-usage-web-01000 - technology-usage-web-01100 @@ -2117,7 +2167,6 @@ - database-02800 - database-02900 - database-03100 - - database-03200 - ejb-01000 - javase-technology-usage-01000 - embedded-framework-01000 @@ -2170,9 +2219,6 @@ - embedded-framework-07900 - embedded-framework-08000 - embedded-framework-08100 - - embedded-framework-08200 - - embedded-framework-08300 - - embedded-framework-08400 - embedded-framework-08500 - embedded-framework-08600 - embedded-framework-08800 @@ -2297,7 +2343,6 @@ - mvc-01000 - mvc-01100 - mvc-01200 - - mvc-01220 - mvc-01300 - mvc-01400 - mvc-01500 @@ -2346,7 +2391,6 @@ - mvc-05800 - mvc-05900 - mvc-06000 - - observability-0100 - security-01100 - security-01200 - security-01300 @@ -2491,9 +2535,6 @@ - technology-usage-embedded-framework-07900 - technology-usage-embedded-framework-08000 - technology-usage-embedded-framework-08100 - - technology-usage-embedded-framework-08200 - - technology-usage-embedded-framework-08300 - - technology-usage-embedded-framework-08400 - technology-usage-embedded-framework-08500 - technology-usage-embedded-framework-08600 - technology-usage-embedded-framework-08800 @@ -2520,7 +2561,6 @@ - technology-usage-database-02800 - technology-usage-database-02900 - technology-usage-database-03100 - - technology-usage-database-03200 - technology-usage-connect-01101 - technology-usage-connect-01400 - technology-usage-connect-01500 diff --git a/test-data/deps-output.yaml b/test-data/deps-output.yaml index b7ba274..fd765b0 100644 --- a/test-data/deps-output.yaml +++ b/test-data/deps-output.yaml @@ -116,8 +116,7 @@ version: 2.7.7 type: compile indirect: true - resolvedIdentifier: | - 83cd2cd674a217ade95a4bb83a8a14f351f48bd0 /home/maven/repository-staging/to-ibiblio/maven2/antlr/antlr/2.7.7/antlr-2.7.7.jar + resolvedIdentifier: 83cd2cd674a217ade95a4bb83a8a14f351f48bd0 labels: - konveyor.io/dep-source=open-source - konveyor.io/language=java