From bd93a6c86fece26de6e9148fcdf0e853496f7d43 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 15:27:38 -0800 Subject: [PATCH 1/8] refactor and split common package Signed-off-by: Harper, Jason M --- ARCHITECTURE.md | 14 +- CONTRIBUTING.md | 10 +- cmd/benchmark/benchmark.go | 69 +- cmd/benchmark/benchmark_tables.go | 17 +- cmd/config/config.go | 38 +- cmd/config/config_tables.go | 51 +- cmd/config/flag_groups.go | 18 +- cmd/config/restore.go | 26 +- cmd/config/set.go | 40 +- cmd/flamegraph/flamegraph.go | 66 +- cmd/flamegraph/flamegraph_tables.go | 9 +- cmd/lock/lock.go | 54 +- cmd/lock/lock_tables.go | 15 +- cmd/metrics/metadata.go | 9 +- cmd/metrics/metadata_aarch.go | 11 +- cmd/metrics/metadata_x86.go | 7 +- cmd/metrics/metrics.go | 148 ++-- cmd/metrics/nmi_watchdog.go | 5 +- cmd/metrics/perf_mux.go | 9 +- cmd/metrics/process.go | 10 +- cmd/metrics/trim.go | 26 +- cmd/report/cpu.go | 166 ---- cmd/report/report.go | 57 +- cmd/report/report_tables.go | 360 ++++----- cmd/report/system.go | 141 ---- cmd/root.go | 54 +- cmd/telemetry/telemetry.go | 67 +- cmd/telemetry/telemetry_tables.go | 16 +- go.mod | 6 +- internal/app/app.go | 82 ++ internal/app/app_tables.go | 82 ++ internal/common/common.go | 755 ------------------ internal/common/table_defs.go | 75 -- internal/common/table_helpers.go | 360 --------- .../extract}/accelerator.go | 38 +- internal/{common => extract}/cache.go | 45 +- internal/{common => extract}/cache_test.go | 8 +- internal/extract/cpu.go | 258 ++++++ .../cpu_test.go} | 155 +--- {cmd/report => internal/extract}/dimm.go | 302 ++++--- internal/extract/extract.go | 174 ++++ internal/extract/extract_test.go | 157 ++++ internal/{common => extract}/frequency.go | 44 +- .../{common => extract}/frequency_test.go | 2 +- {cmd/report => internal/extract}/gpu.go | 25 +- {cmd/report => internal/extract}/isa.go | 22 +- internal/{common => extract}/nic.go | 38 +- internal/{common => extract}/nic_test.go | 20 +- internal/extract/os.go | 16 + internal/{common => extract}/power.go | 43 +- internal/{common => extract}/prefetcher.go | 35 +- {cmd/report => internal/extract}/security.go | 13 +- internal/{common => extract}/storage.go | 62 +- internal/extract/system.go | 91 +++ internal/{common => extract}/turbostat.go | 62 +- .../{common => extract}/turbostat_test.go | 2 +- internal/workflow/collection.go | 183 +++++ internal/workflow/reports.go | 183 +++++ internal/workflow/signals.go | 131 +++ internal/{common => workflow}/targets.go | 71 +- internal/{common => workflow}/targets_test.go | 2 +- internal/workflow/workflow.go | 263 ++++++ 62 files changed, 2705 insertions(+), 2613 deletions(-) delete mode 100644 cmd/report/cpu.go delete mode 100644 cmd/report/system.go create mode 100644 internal/app/app.go create mode 100644 internal/app/app_tables.go delete mode 100644 internal/common/common.go delete mode 100644 internal/common/table_defs.go delete mode 100644 internal/common/table_helpers.go rename {cmd/report => internal/extract}/accelerator.go (72%) rename internal/{common => extract}/cache.go (85%) rename internal/{common => extract}/cache_test.go (96%) create mode 100644 internal/extract/cpu.go rename internal/{common/table_helpers_test.go => extract/cpu_test.go} (65%) rename {cmd/report => internal/extract}/dimm.go (68%) create mode 100644 internal/extract/extract.go create mode 100644 internal/extract/extract_test.go rename internal/{common => extract}/frequency.go (92%) rename internal/{common => extract}/frequency_test.go (99%) rename {cmd/report => internal/extract}/gpu.go (87%) rename {cmd/report => internal/extract}/isa.go (77%) rename internal/{common => extract}/nic.go (83%) rename internal/{common => extract}/nic_test.go (98%) create mode 100644 internal/extract/os.go rename internal/{common => extract}/power.go (88%) rename internal/{common => extract}/prefetcher.go (93%) rename {cmd/report => internal/extract}/security.go (62%) rename internal/{common => extract}/storage.go (53%) create mode 100644 internal/extract/system.go rename internal/{common => extract}/turbostat.go (73%) rename internal/{common => extract}/turbostat_test.go (99%) create mode 100644 internal/workflow/collection.go create mode 100644 internal/workflow/reports.go create mode 100644 internal/workflow/signals.go rename internal/{common => workflow}/targets.go (94%) rename internal/{common => workflow}/targets_test.go (99%) create mode 100644 internal/workflow/workflow.go diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 085140d2..ecb3fb9d 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -16,7 +16,7 @@ PerfSpect is a performance analysis tool for Linux systems. It collects system c ▼ ▼ ┌───────────────────────────────────────┐ ┌────────────────────────────┐ │ ReportingCommand Framework │ │ Custom Command Logic │ -│ (internal/common/common.go) │ │ │ +│ (internal/workflow/) │ │ │ │ │ │ metrics: Loader pattern, │ │ Used by: report, benchmark, │ │ perf event collection, │ │ telemetry, flamegraph, lock │ │ real-time processing │ @@ -58,7 +58,9 @@ perfspect/ │ ├── lock/ # Lock contention analysis │ └── config/ # System configuration commands ├── internal/ # Internal packages -│ ├── common/ # Shared types and ReportingCommand framework +│ ├── app/ # Application context and shared types +│ ├── extract/ # Data extraction functions from script outputs +│ ├── workflow/ # Workflow orchestration for reporting commands │ ├── target/ # Target abstraction (local/remote) │ ├── script/ # Script execution framework │ ├── report/ # Report generation (txt, json, html, xlsx) @@ -91,7 +93,7 @@ type Target interface { - `LocalTarget`: Executes commands directly on the local machine - `RemoteTarget`: Executes commands via SSH on remote machines -### 2. ReportingCommand (`internal/common/common.go`) +### 2. ReportingCommand (`internal/workflow/workflow.go`) Most commands (`report`, `telemetry`, `flamegraph`, `lock`) follow the same workflow. The `ReportingCommand` struct encapsulates this common flow: @@ -179,7 +181,7 @@ The `NewLoader()` factory function returns the appropriate loader based on CPU m - Creates ReportingCommand with table definitions - Calls rc.Run() -3. internal/common/common.go (ReportingCommand.Run): +3. internal/workflow/workflow.go (ReportingCommand.Run): - Creates RemoteTarget from flags - Validates connectivity and privileges - Calls outputsFromTargets() @@ -196,7 +198,7 @@ The `NewLoader()` factory function returns the appropriate loader based on CPU m 6. Back in Run(): - Calls createReports() with collected data - - internal/table/ processes tables, extracts field values + - internal/table/ processes tables using internal/extract/ helper functions - internal/report/ generates reports in requested formats 7. Output: @@ -229,7 +231,7 @@ make test # Run unit tests make check # Run all code quality checks (format, vet, lint) ``` -Test files are colocated with source files (e.g., `common_test.go` alongside `common.go`). +Test files are colocated with source files (e.g., `extract_test.go` alongside `extract.go`). ## Functional Testing Functional tests are located in an Intel internal GitHub repository. The tests run against various Linux distributions and CPU architectures on internal servers and public cloud systems to validate end-to-end functionality. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f3a70faf..ecdc1fc9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,7 +49,7 @@ See [ARCHITECTURE.md](./ARCHITECTURE.md) for a detailed overview of the codebase make test # Run specific test -go test -v ./internal/common/... -run TestName +go test -v ./internal/extract/... -run TestName # Test locally ./perfspect report @@ -68,7 +68,7 @@ go test -v ./internal/common/... -run TestName package yourcommand import ( - "perfspect/internal/common" + "perfspect/internal/workflow" "perfspect/internal/table" "github.com/spf13/cobra" ) @@ -82,12 +82,12 @@ var Cmd = &cobra.Command{ func init() { // Add command-specific flags here - common.AddTargetFlags(Cmd) - common.AddFormatFlag(Cmd) + workflow.AddTargetFlags(Cmd) + workflow.AddFormatFlag(Cmd) } func runYourCommand(cmd *cobra.Command, args []string) error { - rc := common.ReportingCommand{ + rc := workflow.ReportingCommand{ Cmd: cmd, Tables: yourTables, // Define tables for data collection } diff --git a/cmd/benchmark/benchmark.go b/cmd/benchmark/benchmark.go index 99076f4a..99c02937 100644 --- a/cmd/benchmark/benchmark.go +++ b/cmd/benchmark/benchmark.go @@ -16,7 +16,10 @@ import ( "github.com/spf13/pflag" "github.com/xuri/excelize/v2" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/extract" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/report" "perfspect/internal/script" @@ -27,10 +30,10 @@ import ( const cmdName = "benchmark" var examples = []string{ - fmt.Sprintf(" Run all benchmarks: $ %s %s", common.AppName, cmdName), - fmt.Sprintf(" Run specific benchmarks: $ %s %s --speed --power", common.AppName, cmdName), - fmt.Sprintf(" Benchmark remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Benchmark multiple targets:$ %s %s --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Run all benchmarks: $ %s %s", app.Name, cmdName), + fmt.Sprintf(" Run specific benchmarks: $ %s %s --speed --power", app.Name, cmdName), + fmt.Sprintf(" Benchmark remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Benchmark multiple targets:$ %s %s --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -81,7 +84,7 @@ const ( var benchmarkSummaryTableName = "Benchmark Summary" -var categories = []common.Category{ +var categories = []app.Category{ {FlagName: flagSpeedName, FlagVar: &flagSpeed, DefaultValue: false, Help: "CPU speed benchmark", Tables: []table.TableDefinition{tableDefinitions[SpeedBenchmarkTableName]}}, {FlagName: flagPowerName, FlagVar: &flagPower, DefaultValue: false, Help: "power consumption benchmark", Tables: []table.TableDefinition{tableDefinitions[PowerBenchmarkTableName]}}, {FlagName: flagTemperatureName, FlagVar: &flagTemperature, DefaultValue: false, Help: "temperature benchmark", Tables: []table.TableDefinition{tableDefinitions[TemperatureBenchmarkTableName]}}, @@ -97,13 +100,13 @@ func init() { Cmd.Flags().BoolVar(benchmark.FlagVar, benchmark.FlagName, benchmark.DefaultValue, benchmark.Help) } // set up other flags - Cmd.Flags().StringVar(&common.FlagInput, common.FlagInputName, "", "") + Cmd.Flags().StringVar(&app.FlagInput, app.FlagInputName, "", "") Cmd.Flags().BoolVar(&flagAll, flagAllName, true, "") - Cmd.Flags().StringSliceVar(&common.FlagFormat, common.FlagFormatName, []string{report.FormatAll}, "") + Cmd.Flags().StringSliceVar(&app.FlagFormat, app.FlagFormatName, []string{report.FormatAll}, "") Cmd.Flags().BoolVar(&flagNoSystemSummary, flagNoSystemSummaryName, false, "") Cmd.Flags().StringVar(&flagStorageDir, flagStorageDirName, "/tmp", "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -133,25 +136,25 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup - flags := []common.Flag{ +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup + flags := []app.Flag{ { Name: flagAllName, Help: "run all benchmarks", }, } for _, benchmark := range categories { - flags = append(flags, common.Flag{ + flags = append(flags, app.Flag{ Name: benchmark.FlagName, Help: benchmark.Help, }) } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Benchmark Options", Flags: flags, }) - flags = []common.Flag{ + flags = []app.Flag{ { Name: flagNoSystemSummaryName, Help: "do not include system summary in output", @@ -161,22 +164,22 @@ func getFlagGroups() []common.FlagGroup { Help: "existing directory where storage performance benchmark data will be temporarily stored", }, { - Name: common.FlagFormatName, + Name: app.FlagFormatName, Help: fmt.Sprintf("choose output format(s) from: %s", strings.Join(append([]string{report.FormatAll}, report.FormatOptions...), ", ")), }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Other Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) - flags = []common.Flag{ + groups = append(groups, workflow.GetTargetFlagGroup()) + flags = []app.Flag{ { - Name: common.FlagInputName, + Name: app.FlagInputName, Help: "\".raw\" file, or directory containing \".raw\" files. Will skip data collection and use raw data for reports.", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Advanced Options", Flags: flags, }) @@ -194,27 +197,27 @@ func validateFlags(cmd *cobra.Command, args []string) error { } } // validate format options - for _, format := range common.FlagFormat { + for _, format := range app.FlagFormat { formatOptions := append([]string{report.FormatAll}, report.FormatOptions...) if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) } } // validate storage dir if flagStorageDir != "" { if !util.IsValidDirectoryName(flagStorageDir) { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid storage directory name: %s", flagStorageDir)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid storage directory name: %s", flagStorageDir)) } // if no target is specified, i.e., we have a local target only, check if the directory exists - if !cmd.Flags().Lookup(common.FlagTargetsFileName).Changed && !cmd.Flags().Lookup(common.FlagTargetHostName).Changed { + if !cmd.Flags().Lookup(workflow.FlagTargetsFileName).Changed && !cmd.Flags().Lookup(workflow.FlagTargetHostName).Changed { if _, err := os.Stat(flagStorageDir); os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("storage dir does not exist: %s", flagStorageDir)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("storage dir does not exist: %s", flagStorageDir)) } } } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -223,7 +226,7 @@ func runCmd(cmd *cobra.Command, args []string) error { var tables []table.TableDefinition // add system summary table if not disabled if !flagNoSystemSummary { - tables = append(tables, common.TableDefinitions[common.SystemSummaryTableName]) + tables = append(tables, app.TableDefinitions[app.SystemSummaryTableName]) } // add benchmark tables selectedBenchmarkCount := 0 @@ -234,12 +237,12 @@ func runCmd(cmd *cobra.Command, args []string) error { } } // include benchmark summary table if all benchmarks are selected - var summaryFunc common.SummaryFunc + var summaryFunc app.SummaryFunc if selectedBenchmarkCount == len(categories) { summaryFunc = benchmarkSummaryFromTableValues } - reportingCommand := common.ReportingCommand{ + reportingCommand := workflow.ReportingCommand{ Cmd: cmd, ScriptParams: map[string]string{"StorageDir": flagStorageDir}, Tables: tables, @@ -312,8 +315,8 @@ func benchmarkSummaryFromTableValues(allTableValues []table.TableValues, outputs {Name: "Minimum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, PowerBenchmarkTableName), "Minimum Power", 0)}}, {Name: "Memory Peak Bandwidth", Values: []string{maxMemBW}}, {Name: "Memory Minimum Latency", Values: []string{minLatency}}, - {Name: "Microarchitecture", Values: []string{common.UarchFromOutput(outputs)}}, - {Name: "Sockets", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{extract.UarchFromOutput(outputs)}}, + {Name: "Sockets", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, }, } } diff --git a/cmd/benchmark/benchmark_tables.go b/cmd/benchmark/benchmark_tables.go index 51639d6a..3d673f89 100644 --- a/cmd/benchmark/benchmark_tables.go +++ b/cmd/benchmark/benchmark_tables.go @@ -9,7 +9,8 @@ import ( "strconv" "strings" - "perfspect/internal/common" + "perfspect/internal/extract" + "perfspect/internal/cpus" "perfspect/internal/script" "perfspect/internal/table" @@ -106,14 +107,14 @@ func speedBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.F func powerBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "Maximum Power", Values: []string{common.MaxTotalPackagePowerFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, - {Name: "Minimum Power", Values: []string{common.MinTotalPackagePowerFromOutput(outputs[script.IdlePowerBenchmarkScriptName].Stdout)}}, + {Name: "Maximum Power", Values: []string{extract.MaxTotalPackagePowerFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, + {Name: "Minimum Power", Values: []string{extract.MinTotalPackagePowerFromOutput(outputs[script.IdlePowerBenchmarkScriptName].Stdout)}}, } } func temperatureBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "Maximum Temperature", Values: []string{common.MaxPackageTemperatureFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, + {Name: "Maximum Temperature", Values: []string{extract.MaxPackageTemperatureFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, } } @@ -135,10 +136,10 @@ func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []tab } // get the spec core frequencies from the spec output var specSSEFreqs []string - frequencyBuckets, err := common.GetSpecFrequencyBuckets(outputs) + frequencyBuckets, err := extract.GetSpecFrequencyBuckets(outputs) if err == nil && len(frequencyBuckets) >= 2 { // get the frequencies from the buckets - specSSEFreqs, err = common.ExpandTurboFrequencies(frequencyBuckets, "sse") + specSSEFreqs, err = extract.ExpandTurboFrequencies(frequencyBuckets, "sse") if err != nil { slog.Error("unable to convert buckets to counts", slog.String("error", err.Error())) return []table.Field{} @@ -231,7 +232,7 @@ func memoryBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table. 00008 261.54 225073.3 ... */ - latencyBandwidthPairs := common.ValsArrayFromRegexSubmatch(outputs[script.MemoryBenchmarkScriptName].Stdout, `\s*[0-9]*\s*([0-9]*\.[0-9]+)\s*([0-9]*\.[0-9]+)`) + latencyBandwidthPairs := extract.ValsArrayFromRegexSubmatch(outputs[script.MemoryBenchmarkScriptName].Stdout, `\s*[0-9]*\s*([0-9]*\.[0-9]+)\s*([0-9]*\.[0-9]+)`) for _, latencyBandwidth := range latencyBandwidthPairs { latency := latencyBandwidth[0] bandwidth, err := strconv.ParseFloat(latencyBandwidth[1], 32) @@ -259,7 +260,7 @@ func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Fi 0 175610.3 55579.7 1 55575.2 175656.7 */ - nodeBandwidthsPairs := common.ValsArrayFromRegexSubmatch(outputs[script.NumaBenchmarkScriptName].Stdout, `^\s+(\d)\s+(\d.*)$`) + nodeBandwidthsPairs := extract.ValsArrayFromRegexSubmatch(outputs[script.NumaBenchmarkScriptName].Stdout, `^\s+(\d)\s+(\d.*)$`) // add 1 field per numa node for _, nodeBandwidthsPair := range nodeBandwidthsPairs { fields = append(fields, table.Field{Name: nodeBandwidthsPair[0]}) diff --git a/cmd/config/config.go b/cmd/config/config.go index 95e7ea39..c23c100d 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -8,7 +8,9 @@ import ( "fmt" "log/slog" "os" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/report" @@ -25,13 +27,13 @@ import ( const cmdName = "config" var examples = []string{ - fmt.Sprintf(" Set core count on local host: $ %s %s --cores 32", common.AppName, cmdName), - fmt.Sprintf(" Set multiple config items on local host: $ %s %s --core-max 3.0 --uncore-max 2.1 --tdp 120", common.AppName, cmdName), - fmt.Sprintf(" Record config to file before changes: $ %s %s --c6 disable --epb 0 --record", common.AppName, cmdName), - fmt.Sprintf(" Restore config from file: $ %s %s restore gnr_config.txt", common.AppName, cmdName), - fmt.Sprintf(" Set core count on remote target: $ %s %s --cores 32 --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" View current config on remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Set governor on remote targets: $ %s %s --gov performance --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Set core count on local host: $ %s %s --cores 32", app.Name, cmdName), + fmt.Sprintf(" Set multiple config items on local host: $ %s %s --core-max 3.0 --uncore-max 2.1 --tdp 120", app.Name, cmdName), + fmt.Sprintf(" Record config to file before changes: $ %s %s --c6 disable --epb 0 --record", app.Name, cmdName), + fmt.Sprintf(" Restore config from file: $ %s %s restore gnr_config.txt", app.Name, cmdName), + fmt.Sprintf(" Set core count on remote target: $ %s %s --cores 32 --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" View current config on remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Set governor on remote targets: $ %s %s --gov performance --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -54,7 +56,7 @@ func init() { func runCmd(cmd *cobra.Command, args []string) error { // appContext is the application context that holds common data and resources. - appContext := cmd.Parent().Context().Value(common.AppContext{}).(common.AppContext) + appContext := cmd.Parent().Context().Value(app.Context{}).(app.Context) localTempDir := appContext.LocalTempDir outputDir := appContext.OutputDir @@ -73,7 +75,7 @@ func runCmd(cmd *cobra.Command, args []string) error { } } // get the targets - myTargets, targetErrs, err := common.GetTargets(cmd, true, true, localTempDir) + myTargets, targetErrs, err := workflow.GetTargets(cmd, true, true, localTempDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) slog.Error(err.Error()) @@ -301,7 +303,7 @@ func setOnTarget(cmd *cobra.Command, myTarget target.Target, flagGroups []flagGr } // getConfig collects the configuration data from the target(s) -func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetScriptOutputs, error) { +func getConfig(myTargets []target.Target, localTempDir string) ([]workflow.TargetScriptOutputs, error) { var scriptsToRun []script.ScriptDefinition for _, scriptName := range tableDefinitions[ConfigurationTableName].ScriptNames { @@ -309,8 +311,8 @@ func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetS } multiSpinner := progress.NewMultiSpinner() multiSpinner.Start() - orderedTargetScriptOutputs := []common.TargetScriptOutputs{} - channelTargetScriptOutputs := make(chan common.TargetScriptOutputs) + orderedTargetScriptOutputs := []workflow.TargetScriptOutputs{} + channelTargetScriptOutputs := make(chan workflow.TargetScriptOutputs) channelError := make(chan error) for _, myTarget := range myTargets { err := multiSpinner.AddSpinner(myTarget.GetName()) @@ -322,7 +324,7 @@ func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetS go collectOnTarget(myTarget, scriptsToRun, localTempDir, channelTargetScriptOutputs, channelError, multiSpinner.Status) } // wait for scripts to run on all targets - var allTargetScriptOutputs []common.TargetScriptOutputs + var allTargetScriptOutputs []workflow.TargetScriptOutputs for range myTargets { select { case scriptOutputs := <-channelTargetScriptOutputs: @@ -346,7 +348,7 @@ func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetS } // processConfig processes the collected configuration data and creates text reports -func processConfig(targetScriptOutputs []common.TargetScriptOutputs) (map[string][]byte, error) { +func processConfig(targetScriptOutputs []workflow.TargetScriptOutputs) (map[string][]byte, error) { reports := make(map[string][]byte) var err error for _, targetScriptOutput := range targetScriptOutputs { @@ -396,9 +398,9 @@ func printConfig(reports map[string][]byte, toStdout bool, toFile bool, outputDi } // collectOnTarget runs the scripts on the target and sends the results to the appropriate channels -func collectOnTarget(myTarget target.Target, scriptsToRun []script.ScriptDefinition, localTempDir string, channelTargetScriptOutputs chan common.TargetScriptOutputs, channelError chan error, statusUpdate progress.MultiSpinnerUpdateFunc) { +func collectOnTarget(myTarget target.Target, scriptsToRun []script.ScriptDefinition, localTempDir string, channelTargetScriptOutputs chan workflow.TargetScriptOutputs, channelError chan error, statusUpdate progress.MultiSpinnerUpdateFunc) { // run the scripts on the target - scriptOutputs, err := common.RunScripts(myTarget, scriptsToRun, true, localTempDir, statusUpdate, "collecting configuration", false) + scriptOutputs, err := workflow.RunScripts(myTarget, scriptsToRun, true, localTempDir, statusUpdate, "collecting configuration", false) if err != nil { if statusUpdate != nil { _ = statusUpdate(myTarget.GetName(), fmt.Sprintf("error collecting configuration: %v", err)) @@ -410,5 +412,5 @@ func collectOnTarget(myTarget target.Target, scriptsToRun []script.ScriptDefinit if statusUpdate != nil { _ = statusUpdate(myTarget.GetName(), "configuration collection complete") } - channelTargetScriptOutputs <- common.TargetScriptOutputs{TargetName: myTarget.GetName(), ScriptOutputs: scriptOutputs} + channelTargetScriptOutputs <- workflow.TargetScriptOutputs{TargetName: myTarget.GetName(), ScriptOutputs: scriptOutputs} } diff --git a/cmd/config/config_tables.go b/cmd/config/config_tables.go index 12157b09..f25bf7f9 100644 --- a/cmd/config/config_tables.go +++ b/cmd/config/config_tables.go @@ -6,7 +6,8 @@ package config import ( "fmt" "log/slog" - "perfspect/internal/common" + "perfspect/internal/extract" + "perfspect/internal/cpus" "perfspect/internal/script" "perfspect/internal/table" @@ -56,7 +57,7 @@ var tableDefinitions = map[string]table.TableDefinition{ } func configurationTableValues(outputs map[string]script.ScriptOutput) []table.Field { - uarch := common.UarchFromOutput(outputs) + uarch := extract.UarchFromOutput(outputs) if uarch == "" { slog.Error("failed to get uarch from script outputs") return []table.Field{} @@ -65,51 +66,51 @@ func configurationTableValues(outputs map[string]script.ScriptOutput) []table.Fi // command implements its own print logic and uses the Description field to show the command line // argument for each config item. fields := []table.Field{ - {Name: "Cores per Socket", Description: "--cores ", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Cores per Socket", Description: "--cores ", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, {Name: "L3 Cache", Description: "--llc ", Values: []string{l3InstanceFromOutput(outputs)}}, - {Name: "Package Power / TDP", Description: "--tdp ", Values: []string{common.TDPFromOutput(outputs)}}, + {Name: "Package Power / TDP", Description: "--tdp ", Values: []string{extract.TDPFromOutput(outputs)}}, {Name: "Core SSE Frequency", Description: "--core-max ", Values: []string{sseFrequenciesFromOutput(outputs)}}, } if strings.Contains(uarch, cpus.UarchSRF) || strings.Contains(uarch, cpus.UarchGNR) || strings.Contains(uarch, cpus.UarchCWF) { fields = append(fields, []table.Field{ - {Name: "Uncore Max Frequency (Compute)", Description: "--uncore-max-compute ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, - {Name: "Uncore Min Frequency (Compute)", Description: "--uncore-min-compute ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, - {Name: "Uncore Max Frequency (I/O)", Description: "--uncore-max-io ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, - {Name: "Uncore Min Frequency (I/O)", Description: "--uncore-min-io ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, + {Name: "Uncore Max Frequency (Compute)", Description: "--uncore-max-compute ", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, + {Name: "Uncore Min Frequency (Compute)", Description: "--uncore-min-compute ", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, + {Name: "Uncore Max Frequency (I/O)", Description: "--uncore-max-io ", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, + {Name: "Uncore Min Frequency (I/O)", Description: "--uncore-min-io ", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, }...) } else { fields = append(fields, []table.Field{ - {Name: "Uncore Max Frequency", Description: "--uncore-max ", Values: []string{common.UncoreMaxFrequencyFromOutput(outputs)}}, - {Name: "Uncore Min Frequency", Description: "--uncore-min ", Values: []string{common.UncoreMinFrequencyFromOutput(outputs)}}, + {Name: "Uncore Max Frequency", Description: "--uncore-max ", Values: []string{extract.UncoreMaxFrequencyFromOutput(outputs)}}, + {Name: "Uncore Min Frequency", Description: "--uncore-min ", Values: []string{extract.UncoreMinFrequencyFromOutput(outputs)}}, }...) } fields = append(fields, []table.Field{ - {Name: "Energy Performance Bias", Description: "--epb <0-15>", Values: []string{common.EPBFromOutput(outputs)}}, - {Name: "Energy Performance Preference", Description: "--epp <0-255>", Values: []string{common.EPPFromOutput(outputs)}}, + {Name: "Energy Performance Bias", Description: "--epb <0-15>", Values: []string{extract.EPBFromOutput(outputs)}}, + {Name: "Energy Performance Preference", Description: "--epp <0-255>", Values: []string{extract.EPPFromOutput(outputs)}}, {Name: "Scaling Governor", Description: "--gov ", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, }...) // add ELC (for SRF, CWF and GNR only) if strings.Contains(uarch, cpus.UarchSRF) || strings.Contains(uarch, cpus.UarchGNR) || strings.Contains(uarch, cpus.UarchCWF) { - fields = append(fields, table.Field{Name: "Efficiency Latency Control", Description: "--elc ", Values: []string{common.ELCSummaryFromOutput(outputs)}}) + fields = append(fields, table.Field{Name: "Efficiency Latency Control", Description: "--elc ", Values: []string{extract.ELCSummaryFromOutput(outputs)}}) } // add prefetchers - for _, pf := range common.PrefetcherDefinitions { + for _, pf := range extract.PrefetcherDefinitions { if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { var scriptName string switch pf.Msr { - case common.MsrPrefetchControl: + case extract.MsrPrefetchControl: scriptName = script.PrefetchControlName - case common.MsrPrefetchers: + case extract.MsrPrefetchers: scriptName = script.PrefetchersName - case common.MsrAtomPrefTuning1: + case extract.MsrAtomPrefTuning1: scriptName = script.PrefetchersAtomName default: slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) continue } - msrVal := common.ValFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + msrVal := extract.ValFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) var enabledDisabled string - enabled, err := common.IsPrefetcherEnabled(msrVal, pf.Bit) + enabled, err := extract.IsPrefetcherEnabled(msrVal, pf.Bit) if err != nil { slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) continue @@ -128,7 +129,7 @@ func configurationTableValues(outputs map[string]script.ScriptOutput) []table.Fi } } // add C6 - c6 := common.C6FromOutput(outputs) + c6 := extract.C6FromOutput(outputs) if c6 != "" { fields = append(fields, table.Field{Name: "C6", Description: "--c6 ", Values: []string{c6}}) } @@ -142,27 +143,27 @@ func configurationTableValues(outputs map[string]script.ScriptOutput) []table.Fi // l3InstanceFromOutput retrieves the L3 cache size per instance (per socket on Intel) in megabytes func l3InstanceFromOutput(outputs map[string]script.ScriptOutput) string { - l3InstanceMB, _, err := common.GetL3MSRMB(outputs) + l3InstanceMB, _, err := extract.GetL3MSRMB(outputs) if err != nil { slog.Debug("Could not get L3 size from MSR, falling back to lscpu", slog.String("error", err.Error())) - l3InstanceMB, _, err = common.GetL3LscpuMB(outputs) + l3InstanceMB, _, err = extract.GetL3LscpuMB(outputs) if err != nil { slog.Warn("Could not get L3 size from lscpu", slog.String("error", err.Error())) return "" } } - return common.FormatCacheSizeMB(l3InstanceMB) + return extract.FormatCacheSizeMB(l3InstanceMB) } // sseFrequenciesFromOutput gets the bucketed SSE frequencies from the output // and returns a compact string representation with consolidated ranges, e.g.: // "1-40/3.5, 41-60/3.4, 61-86/3.2" func sseFrequenciesFromOutput(outputs map[string]script.ScriptOutput) string { - specCoreFrequencies, err := common.GetSpecFrequencyBuckets(outputs) + specCoreFrequencies, err := extract.GetSpecFrequencyBuckets(outputs) if err != nil { return "" } - sseFreqs := common.GetSSEFreqsFromBuckets(specCoreFrequencies) + sseFreqs := extract.GetSSEFreqsFromBuckets(specCoreFrequencies) if len(sseFreqs) < 1 { return "" } diff --git a/cmd/config/flag_groups.go b/cmd/config/flag_groups.go index 6aacf99a..ac24682a 100644 --- a/cmd/config/flag_groups.go +++ b/cmd/config/flag_groups.go @@ -5,12 +5,14 @@ package config import ( "fmt" - "perfspect/internal/common" - "perfspect/internal/target" "regexp" "slices" "strings" + "perfspect/internal/extract" + "perfspect/internal/target" + "perfspect/internal/workflow" + "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -192,7 +194,7 @@ func initializeFlags(cmd *cobra.Command) { flagGroups = append(flagGroups, group) // prefetcher options group = flagGroup{name: flagGroupPrefetcherName, flags: []flagDefinition{}} - for _, pref := range common.GetPrefetcherDefinitions() { + for _, pref := range extract.GetPrefetcherDefinitions() { group.flags = append(group.flags, newStringFlag(cmd, // flag name @@ -247,7 +249,7 @@ func initializeFlags(cmd *cobra.Command) { ) flagGroups = append(flagGroups, group) - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -263,7 +265,7 @@ func usageFunc(cmd *cobra.Command) error { } } - targetFlagGroup := common.GetTargetFlagGroup() + targetFlagGroup := workflow.GetTargetFlagGroup() cmd.Printf(" %s:\n", targetFlagGroup.GroupName) for _, flag := range targetFlagGroup.Flags { cmd.Printf(" --%-20s %s\n", flag.Name, flag.Help) @@ -292,14 +294,14 @@ func validateFlags(cmd *cobra.Command, args []string) error { for _, flag := range group.flags { if cmd.Flags().Lookup(flag.GetName()).Changed && flag.validationFunc != nil { if !flag.validationFunc(cmd) { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid flag value, --%s %s, valid values are %s", flag.GetName(), flag.GetValueAsString(), flag.validationDescription)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid flag value, --%s %s, valid values are %s", flag.GetName(), flag.GetValueAsString(), flag.validationDescription)) } } } } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } diff --git a/cmd/config/restore.go b/cmd/config/restore.go index 43f12345..71f17217 100644 --- a/cmd/config/restore.go +++ b/cmd/config/restore.go @@ -11,7 +11,9 @@ import ( "log/slog" "os" "os/exec" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/workflow" + "perfspect/internal/progress" "regexp" "slices" @@ -32,9 +34,9 @@ type flagValue struct { } var restoreExamples = []string{ - fmt.Sprintf(" Restore config from file on local host: $ %s %s %s gnr_config.txt", common.AppName, cmdName, restoreCmdName), - fmt.Sprintf(" Restore config on remote target: $ %s %s %s gnr_config.txt --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName, restoreCmdName), - fmt.Sprintf(" Restore config without confirmation: $ %s %s %s gnr_config.txt --yes", common.AppName, cmdName, restoreCmdName), + fmt.Sprintf(" Restore config from file on local host: $ %s %s %s gnr_config.txt", app.Name, cmdName, restoreCmdName), + fmt.Sprintf(" Restore config on remote target: $ %s %s %s gnr_config.txt --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName, restoreCmdName), + fmt.Sprintf(" Restore config without confirmation: $ %s %s %s gnr_config.txt --yes", app.Name, cmdName, restoreCmdName), } var RestoreCmd = &cobra.Command{ @@ -64,7 +66,7 @@ func init() { RestoreCmd.Flags().BoolVar(&flagRestoreYes, flagRestoreYesName, false, "skip confirmation prompt") - common.AddTargetFlags(RestoreCmd) + workflow.AddTargetFlags(RestoreCmd) RestoreCmd.SetUsageFunc(restoreUsageFunc) } @@ -78,7 +80,7 @@ func restoreUsageFunc(cmd *cobra.Command) error { cmd.Print(" General Options:\n") cmd.Printf(" --%-20s %s\n", flagRestoreYesName, "skip confirmation prompt") - targetFlagGroup := common.GetTargetFlagGroup() + targetFlagGroup := workflow.GetTargetFlagGroup() cmd.Printf(" %s:\n", targetFlagGroup.GroupName) for _, flag := range targetFlagGroup.Flags { cmd.Printf(" --%-20s %s\n", flag.Name, flag.Help) @@ -98,15 +100,15 @@ func restoreUsageFunc(cmd *cobra.Command) error { func validateRestoreFlags(cmd *cobra.Command, args []string) error { // validate that the file exists if len(args) != 1 { - return common.FlagValidationError(cmd, "restore requires exactly one argument: the path to the configuration file") + return workflow.FlagValidationError(cmd, "restore requires exactly one argument: the path to the configuration file") } filePath := args[0] if _, err := os.Stat(filePath); os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("configuration file does not exist: %s", filePath)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("configuration file does not exist: %s", filePath)) } // validate common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -157,7 +159,7 @@ func runRestoreCmd(cmd *cobra.Command, args []string) error { cmdArgs := []string{"config"} // copy target flags from restore command first - targetFlags := []string{common.FlagTargetHostName, common.FlagTargetsFileName, common.FlagTargetUserName, common.FlagTargetKeyName, common.FlagTargetPortName} + targetFlags := []string{workflow.FlagTargetHostName, workflow.FlagTargetsFileName, workflow.FlagTargetUserName, workflow.FlagTargetKeyName, workflow.FlagTargetPortName} for _, flagName := range targetFlags { if flag := cmd.Flags().Lookup(flagName); flag != nil && flag.Changed { cmdArgs = append(cmdArgs, fmt.Sprintf("--%s", flagName), flag.Value.String()) @@ -165,7 +167,7 @@ func runRestoreCmd(cmd *cobra.Command, args []string) error { } // copy relevant global flags from root command next - globalFlags := []string{common.FlagDebugName, common.FlagOutputDirName, common.FlagTargetTempRootName, common.FlagSyslogName, common.FlagLogStdOutName} + globalFlags := []string{app.FlagDebugName, app.FlagOutputDirName, app.FlagTargetTempRootName, app.FlagSyslogName, app.FlagLogStdOutName} for _, flagName := range globalFlags { if flag := cmd.Root().PersistentFlags().Lookup(flagName); flag != nil && flag.Changed { if flag.Value.Type() == "bool" { diff --git a/cmd/config/set.go b/cmd/config/set.go index b3b374c7..ca91a0a8 100644 --- a/cmd/config/set.go +++ b/cmd/config/set.go @@ -4,7 +4,9 @@ import ( "fmt" "log/slog" "math" - "perfspect/internal/common" + "perfspect/internal/extract" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/script" "perfspect/internal/target" @@ -126,12 +128,12 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str scripts = append(scripts, script.GetScriptByName(script.LspciBitsScriptName)) scripts = append(scripts, script.GetScriptByName(script.LspciDevicesScriptName)) scripts = append(scripts, script.GetScriptByName(script.L3CacheWayEnabledName)) - outputs, err := common.RunScripts(myTarget, scripts, true, localTempDir, nil, "", false) + outputs, err := workflow.RunScripts(myTarget, scripts, true, localTempDir, nil, "", false) if err != nil { return fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := common.UarchFromOutput(outputs) + uarch := extract.UarchFromOutput(outputs) cpu, err := cpus.GetCPUByMicroArchitecture(uarch) if err != nil { return fmt.Errorf("failed to get CPU by microarchitecture: %w", err) @@ -139,11 +141,11 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str if cpu.CacheWayCount == 0 { return fmt.Errorf("cache way count is zero") } - maximumLlcSize, _, err := common.GetL3LscpuMB(outputs) + maximumLlcSize, _, err := extract.GetL3LscpuMB(outputs) if err != nil { return fmt.Errorf("failed to get maximum LLC size: %w", err) } - currentLlcSize, _, err := common.GetL3MSRMB(outputs) + currentLlcSize, _, err := extract.GetL3MSRMB(outputs) if err != nil { return fmt.Errorf("failed to get current LLC size: %w", err) } @@ -181,15 +183,15 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str } func setSSEFrequency(sseFrequency float64, myTarget target.Target, localTempDir string) error { - targetFamily, err := common.GetTargetFamily(myTarget) + targetFamily, err := workflow.GetTargetFamily(myTarget) if err != nil { return fmt.Errorf("failed to get target family: %w", err) } - targetModel, err := common.GetTargetModel(myTarget) + targetModel, err := workflow.GetTargetModel(myTarget) if err != nil { return fmt.Errorf("failed to get target model: %w", err) } - targetVendor, err := common.GetTargetVendor(myTarget) + targetVendor, err := workflow.GetTargetVendor(myTarget) if err != nil { return fmt.Errorf("failed to get target vendor: %w", err) } @@ -340,15 +342,15 @@ func expandConsolidatedFrequencies(consolidatedStr string, bucketSizes []int) ([ // Note that the buckets have been consolidated where frequencies are the same, so they // will need to be expanded back out to individual buckets for setting. func setSSEFrequencies(sseFrequencies string, myTarget target.Target, localTempDir string) error { - targetFamily, err := common.GetTargetFamily(myTarget) + targetFamily, err := workflow.GetTargetFamily(myTarget) if err != nil { return fmt.Errorf("failed to get target family: %w", err) } - targetModel, err := common.GetTargetModel(myTarget) + targetModel, err := workflow.GetTargetModel(myTarget) if err != nil { return fmt.Errorf("failed to get target model: %w", err) } - targetVendor, err := common.GetTargetVendor(myTarget) + targetVendor, err := workflow.GetTargetVendor(myTarget) if err != nil { return fmt.Errorf("failed to get target vendor: %w", err) } @@ -484,7 +486,7 @@ func setUncoreDieFrequency(maxFreq bool, computeDie bool, uncoreFrequency float6 var dies []dieId // build list of compute or IO dies dieTypesScript := script.GetScriptByName(script.UncoreDieTypesFromTPMIScriptName) - scriptOutput, err := common.RunScript(myTarget, dieTypesScript, localTempDir, false) + scriptOutput, err := workflow.RunScript(myTarget, dieTypesScript, localTempDir, false) if err != nil { return fmt.Errorf("failed to run script on target: %w", err) } @@ -525,7 +527,7 @@ func setUncoreDieFrequency(maxFreq bool, computeDie bool, uncoreFrequency float6 } scripts = append(scripts, setScript) } - _, err = common.RunScripts(myTarget, scripts, false, localTempDir, nil, "", false) + _, err = workflow.RunScripts(myTarget, scripts, false, localTempDir, nil, "", false) if err != nil { err = fmt.Errorf("failed to set uncore die frequency: %w", err) slog.Error(err.Error()) @@ -547,7 +549,7 @@ func setUncoreFrequency(maxFreq bool, uncoreFrequency float64, myTarget target.T // Depends: []string{"rdmsr"}, // Lkms: []string{"msr"}, } - scriptOutput, err := common.RunScript(myTarget, getScript, localTempDir, false) + scriptOutput, err := workflow.RunScript(myTarget, getScript, localTempDir, false) if err != nil { return fmt.Errorf("failed to run scripts on target: %w", err) } @@ -593,7 +595,7 @@ func setTDP(power int, myTarget target.Target, localTempDir string) error { // Lkms: []string{"msr"}, // Depends: []string{"rdmsr"}, } - readOutput, err := common.RunScript(myTarget, readScript, localTempDir, false) + readOutput, err := workflow.RunScript(myTarget, readScript, localTempDir, false) if err != nil { return fmt.Errorf("failed to read power MSR: %w", err) } else { @@ -797,11 +799,11 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { scripts = append(scripts, script.GetScriptByName(script.LscpuScriptName)) scripts = append(scripts, script.GetScriptByName(script.LspciBitsScriptName)) scripts = append(scripts, script.GetScriptByName(script.LspciDevicesScriptName)) - outputs, err := common.RunScripts(myTarget, scripts, true, localTempDir, nil, "", false) + outputs, err := workflow.RunScripts(myTarget, scripts, true, localTempDir, nil, "", false) if err != nil { return "", fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := common.UarchFromOutput(outputs) + uarch := extract.UarchFromOutput(outputs) if uarch == "" { return "", fmt.Errorf("failed to get microarchitecture") } @@ -809,7 +811,7 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { } func setPrefetcher(enableDisable string, myTarget target.Target, localTempDir string, prefetcherType string) error { - pf, err := common.GetPrefetcherDefByName(prefetcherType) + pf, err := extract.GetPrefetcherDefByName(prefetcherType) if err != nil { return fmt.Errorf("failed to get prefetcher definition: %w", err) } @@ -971,7 +973,7 @@ func setC1Demotion(enableDisable string, myTarget target.Target, localTempDir st // runScript runs a script on the target and returns the output func runScript(myTarget target.Target, myScript script.ScriptDefinition, localTempDir string) (string, error) { - output, err := common.RunScript(myTarget, myScript, localTempDir, false) // nosemgrep + output, err := workflow.RunScript(myTarget, myScript, localTempDir, false) // nosemgrep if err != nil { slog.Error("failed to run script on target", slog.String("target", myTarget.GetName()), slog.String("error", err.Error()), slog.String("stdout", output.Stdout), slog.String("stderr", output.Stderr)) } else { diff --git a/cmd/flamegraph/flamegraph.go b/cmd/flamegraph/flamegraph.go index 3a6c7b6f..8547c16d 100644 --- a/cmd/flamegraph/flamegraph.go +++ b/cmd/flamegraph/flamegraph.go @@ -7,14 +7,16 @@ package flamegraph import ( "fmt" "os" - "perfspect/internal/common" - "perfspect/internal/report" - "perfspect/internal/table" - "perfspect/internal/util" "slices" "strconv" "strings" + "perfspect/internal/app" + "perfspect/internal/report" + "perfspect/internal/table" + "perfspect/internal/util" + "perfspect/internal/workflow" + "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -22,9 +24,9 @@ import ( const cmdName = "flamegraph" var examples = []string{ - fmt.Sprintf(" Flamegraph from local host: $ %s %s", common.AppName, cmdName), - fmt.Sprintf(" Flamegraph from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Flamegraph from multiple targets: $ %s %s --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Flamegraph from local host: $ %s %s", app.Name, cmdName), + fmt.Sprintf(" Flamegraph from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Flamegraph from multiple targets: $ %s %s --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -57,15 +59,15 @@ const ( ) func init() { - Cmd.Flags().StringVar(&common.FlagInput, common.FlagInputName, "", "") - Cmd.Flags().StringSliceVar(&common.FlagFormat, common.FlagFormatName, []string{report.FormatAll}, "") + Cmd.Flags().StringVar(&app.FlagInput, app.FlagInputName, "", "") + Cmd.Flags().StringSliceVar(&app.FlagFormat, app.FlagFormatName, []string{report.FormatAll}, "") Cmd.Flags().IntVar(&flagDuration, flagDurationName, 0, "") Cmd.Flags().IntVar(&flagFrequency, flagFrequencyName, 11, "") Cmd.Flags().IntSliceVar(&flagPids, flagPidsName, nil, "") Cmd.Flags().BoolVar(&flagNoSystemSummary, flagNoSystemSummaryName, false, "") Cmd.Flags().IntVar(&flagMaxDepth, flagMaxDepthName, 0, "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -95,9 +97,9 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup - flags := []common.Flag{ +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup + flags := []app.Flag{ { Name: flagDurationName, Help: "number of seconds to run the collection. If 0, the collection will run indefinitely. Ctrl+c to stop.", @@ -111,7 +113,7 @@ func getFlagGroups() []common.FlagGroup { Help: "comma separated list of PIDs. If not specified, all PIDs will be collected", }, { - Name: common.FlagFormatName, + Name: app.FlagFormatName, Help: fmt.Sprintf("choose output format(s) from: %s", strings.Join(append([]string{report.FormatAll}, report.FormatHtml, report.FormatTxt, report.FormatJson), ", ")), }, { @@ -123,18 +125,18 @@ func getFlagGroups() []common.FlagGroup { Help: "do not include system summary table in report", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) - flags = []common.Flag{ + groups = append(groups, workflow.GetTargetFlagGroup()) + flags = []app.Flag{ { - Name: common.FlagInputName, + Name: app.FlagInputName, Help: "\".raw\" file, or directory containing \".raw\" files. Will skip data collection and use raw data for reports.", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Advanced Options", Flags: flags, }) @@ -143,35 +145,35 @@ func getFlagGroups() []common.FlagGroup { func validateFlags(cmd *cobra.Command, args []string) error { // validate format options - for _, format := range common.FlagFormat { + for _, format := range app.FlagFormat { formatOptions := append([]string{report.FormatAll}, report.FormatHtml, report.FormatTxt, report.FormatJson) if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) } } // validate input file - if common.FlagInput != "" { - if _, err := os.Stat(common.FlagInput); os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("input file %s does not exist", common.FlagInput)) + if app.FlagInput != "" { + if _, err := os.Stat(app.FlagInput); os.IsNotExist(err) { + return workflow.FlagValidationError(cmd, fmt.Sprintf("input file %s does not exist", app.FlagInput)) } } if flagDuration < 0 { - return common.FlagValidationError(cmd, "duration must be 0 or greater") + return workflow.FlagValidationError(cmd, "duration must be 0 or greater") } if flagFrequency <= 0 { - return common.FlagValidationError(cmd, "frequency must be 1 or greater") + return workflow.FlagValidationError(cmd, "frequency must be 1 or greater") } for _, pid := range flagPids { if pid < 0 { - return common.FlagValidationError(cmd, "PID must be 0 or greater") + return workflow.FlagValidationError(cmd, "PID must be 0 or greater") } } if flagMaxDepth < 0 { - return common.FlagValidationError(cmd, "max depth must be 0 or greater") + return workflow.FlagValidationError(cmd, "max depth must be 0 or greater") } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -179,10 +181,10 @@ func validateFlags(cmd *cobra.Command, args []string) error { func runCmd(cmd *cobra.Command, args []string) error { var tables []table.TableDefinition if !flagNoSystemSummary { - tables = append(tables, common.TableDefinitions[common.SystemSummaryTableName]) + tables = append(tables, app.TableDefinitions[app.SystemSummaryTableName]) } tables = append(tables, tableDefinitions[CallStackFrequencyTableName]) - reportingCommand := common.ReportingCommand{ + reportingCommand := workflow.ReportingCommand{ Cmd: cmd, ReportNamePost: "flame", ScriptParams: map[string]string{ diff --git a/cmd/flamegraph/flamegraph_tables.go b/cmd/flamegraph/flamegraph_tables.go index 65120108..54e59e30 100644 --- a/cmd/flamegraph/flamegraph_tables.go +++ b/cmd/flamegraph/flamegraph_tables.go @@ -7,7 +7,8 @@ import ( "fmt" "log/slog" "math" - "perfspect/internal/common" + "perfspect/internal/extract" + "perfspect/internal/script" "perfspect/internal/table" "regexp" @@ -45,7 +46,7 @@ func javaFoldedFromOutput(outputs map[string]script.ScriptOutput) string { slog.Warn("collapsed call stack output is empty") return "" } - sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := extract.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" @@ -87,7 +88,7 @@ func nativeFoldedFromOutput(outputs map[string]script.ScriptOutput) string { slog.Warn("collapsed call stack output is empty") return "" } - sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := extract.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" @@ -116,7 +117,7 @@ func maxRenderDepthFromOutput(outputs map[string]script.ScriptOutput) string { slog.Warn("collapsed call stack output is empty") return "" } - sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := extract.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" diff --git a/cmd/lock/lock.go b/cmd/lock/lock.go index 72ee3f02..11914467 100755 --- a/cmd/lock/lock.go +++ b/cmd/lock/lock.go @@ -7,15 +7,17 @@ package lock import ( "fmt" "path/filepath" - "perfspect/internal/common" + "slices" + "strconv" + "strings" + + "perfspect/internal/app" "perfspect/internal/progress" "perfspect/internal/report" "perfspect/internal/script" "perfspect/internal/table" "perfspect/internal/target" - "slices" - "strconv" - "strings" + "perfspect/internal/workflow" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -24,9 +26,9 @@ import ( const cmdName = "lock" var examples = []string{ - fmt.Sprintf(" Lock inspect from local host: $ %s %s", common.AppName, cmdName), - fmt.Sprintf(" Lock inspect from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Lock inspect from multiple targets: $ %s %s --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Lock inspect from local host: $ %s %s", app.Name, cmdName), + fmt.Sprintf(" Lock inspect from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Lock inspect from multiple targets: $ %s %s --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -57,14 +59,14 @@ const ( ) func init() { - Cmd.Flags().StringVar(&common.FlagInput, common.FlagInputName, "", "") - Cmd.Flags().StringSliceVar(&flagFormat, common.FlagFormatName, []string{report.FormatAll}, "") + Cmd.Flags().StringVar(&app.FlagInput, app.FlagInputName, "", "") + Cmd.Flags().StringSliceVar(&flagFormat, app.FlagFormatName, []string{report.FormatAll}, "") Cmd.Flags().IntVar(&flagDuration, flagDurationName, 10, "") Cmd.Flags().IntVar(&flagFrequency, flagFrequencyName, 11, "") Cmd.PersistentFlags().BoolVar(&flagPackage, flagPackageName, false, "") Cmd.Flags().BoolVar(&flagNoSystemSummary, flagNoSystemSummaryName, false, "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -94,9 +96,9 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup - flags := []common.Flag{ +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup + flags := []app.Flag{ { Name: flagDurationName, Help: "number of seconds to run the collection", @@ -110,7 +112,7 @@ func getFlagGroups() []common.FlagGroup { Help: "create raw data package", }, { - Name: common.FlagFormatName, + Name: app.FlagFormatName, Help: fmt.Sprintf("choose output format(s) from: %s", strings.Join(append([]string{report.FormatAll}, report.FormatHtml, report.FormatTxt), ", ")), }, { @@ -118,11 +120,11 @@ func getFlagGroups() []common.FlagGroup { Help: "do not include system summary table in report", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) + groups = append(groups, workflow.GetTargetFlagGroup()) return groups } @@ -132,18 +134,18 @@ func validateFlags(cmd *cobra.Command, args []string) error { formatOptions := append([]string{report.FormatAll}, report.FormatHtml, report.FormatTxt) for _, format := range flagFormat { if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) } } if flagDuration <= 0 { - return common.FlagValidationError(cmd, "duration must be greater than 0") + return workflow.FlagValidationError(cmd, "duration must be greater than 0") } if flagFrequency <= 0 { - return common.FlagValidationError(cmd, "frequency must be greater than 0") + return workflow.FlagValidationError(cmd, "frequency must be greater than 0") } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -164,7 +166,7 @@ func formalizeOutputFormat(outputFormat []string) []string { return result } -func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script.ScriptOutput, myTarget target.Target, statusUpdate progress.MultiSpinnerUpdateFunc) error { +func pullDataFiles(appContext app.Context, scriptOutputs map[string]script.ScriptOutput, myTarget target.Target, statusUpdate progress.MultiSpinnerUpdateFunc) error { localOutputDir := appContext.OutputDir tableValues := table.GetValuesForTable(tableDefinitions[KernelLockAnalysisTableName], scriptOutputs) found := false @@ -195,10 +197,10 @@ func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script func runCmd(cmd *cobra.Command, args []string) error { var tables []table.TableDefinition if !flagNoSystemSummary { - tables = append(tables, common.TableDefinitions[common.SystemSummaryTableName]) + tables = append(tables, app.TableDefinitions[app.SystemSummaryTableName]) } tables = append(tables, tableDefinitions[KernelLockAnalysisTableName]) - reportingCommand := common.ReportingCommand{ + reportingCommand := workflow.ReportingCommand{ Cmd: cmd, ReportNamePost: "lock", ScriptParams: map[string]string{ @@ -214,11 +216,11 @@ func runCmd(cmd *cobra.Command, args []string) error { reportingCommand.AdhocFunc = pullDataFiles } - // The common.FlagFormat designed to hold the output formats, but as a global variable, + // The app.FlagFormat designed to hold the output formats, but as a global variable, // it would be overwrite by other command's initialization function. So the current // workaround is to make an assignment to ensure the current command's output format // flag takes effect as expected. - common.FlagFormat = formalizeOutputFormat(flagFormat) + app.FlagFormat = formalizeOutputFormat(flagFormat) report.RegisterHTMLRenderer(KernelLockAnalysisTableName, kernelLockAnalysisHTMLRenderer) diff --git a/cmd/lock/lock_tables.go b/cmd/lock/lock_tables.go index 11823456..8fad25e4 100644 --- a/cmd/lock/lock_tables.go +++ b/cmd/lock/lock_tables.go @@ -4,7 +4,8 @@ package lock // SPDX-License-Identifier: BSD-3-Clause import ( - "perfspect/internal/common" + "perfspect/internal/extract" + "perfspect/internal/script" "perfspect/internal/table" "strings" @@ -28,12 +29,12 @@ var tableDefinitions = map[string]table.TableDefinition{ func kernelLockAnalysisTableValues(outputs map[string]script.ScriptOutput) []table.Field { fields := []table.Field{ - {Name: "Hotspot without Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_no_children")}}, - {Name: "Hotspot with Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_callgraph")}}, - {Name: "Cache2Cache without Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_no_children")}}, - {Name: "Cache2Cache with CallStack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_callgraph")}}, - {Name: "Lock Contention", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_lock_contention")}}, - {Name: "Perf Package Path", Values: []string{strings.TrimSpace(common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_package_path"))}}, + {Name: "Hotspot without Callstack", Values: []string{extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_no_children")}}, + {Name: "Hotspot with Callstack", Values: []string{extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_callgraph")}}, + {Name: "Cache2Cache without Callstack", Values: []string{extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_no_children")}}, + {Name: "Cache2Cache with CallStack", Values: []string{extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_callgraph")}}, + {Name: "Lock Contention", Values: []string{extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_lock_contention")}}, + {Name: "Perf Package Path", Values: []string{strings.TrimSpace(extract.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_package_path"))}}, } return fields } diff --git a/cmd/metrics/metadata.go b/cmd/metrics/metadata.go index a1fbecfe..825c7db9 100644 --- a/cmd/metrics/metadata.go +++ b/cmd/metrics/metadata.go @@ -19,12 +19,13 @@ import ( "strings" "time" - "perfspect/internal/common" + "perfspect/internal/app" "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/script" "perfspect/internal/table" "perfspect/internal/target" + "perfspect/internal/workflow" ) // Script name constants - used as map keys when retrieving script outputs. @@ -107,7 +108,7 @@ type MetadataCollector interface { // LoadMetadata populates and returns a Metadata structure containing state of the system. func LoadMetadata(t target.Target, noRoot bool, noSystemSummary bool, localTempDir string, statusUpdate progress.MultiSpinnerUpdateFunc) (Metadata, error) { - uarch, err := common.GetTargetArchitecture(t) + uarch, err := workflow.GetTargetArchitecture(t) if err != nil { return Metadata{}, fmt.Errorf("failed to get target architecture: %v", err) } @@ -289,7 +290,7 @@ func getMetadataScripts(noRoot bool, noSystemSummary bool, numGPCounters int) ([ // Add the system summary table scripts if !noSystemSummary { - for _, scriptName := range common.TableDefinitions[common.SystemSummaryTableName].ScriptNames { + for _, scriptName := range app.TableDefinitions[app.SystemSummaryTableName].ScriptNames { scriptDef := script.GetScriptByName(scriptName) metadataScripts = append(metadataScripts, scriptDef) } @@ -369,7 +370,7 @@ func ReadJSONFromFile(path string) (md Metadata, err error) { // getSystemSummary retrieves the system summary from script outputs. func getSystemSummary(scriptOutputs map[string]script.ScriptOutput) (summaryFields [][]string, err error) { - allTableValues, err := table.ProcessTables([]table.TableDefinition{common.TableDefinitions[common.SystemSummaryTableName]}, scriptOutputs) + allTableValues, err := table.ProcessTables([]table.TableDefinition{app.TableDefinitions[app.SystemSummaryTableName]}, scriptOutputs) if err != nil { err = fmt.Errorf("failed to process script outputs: %w", err) return diff --git a/cmd/metrics/metadata_aarch.go b/cmd/metrics/metadata_aarch.go index 490e47f3..edbcf921 100644 --- a/cmd/metrics/metadata_aarch.go +++ b/cmd/metrics/metadata_aarch.go @@ -14,7 +14,8 @@ import ( "strconv" "strings" - "perfspect/internal/common" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/script" @@ -80,7 +81,7 @@ func (c *ARMMetadataCollector) CollectMetadata(t target.Target, noRoot bool, noS } // Microarchitecture - metadata.Microarchitecture, err = common.GetTargetMicroArchitecture(t, localTempDir, noRoot) + metadata.Microarchitecture, err = workflow.GetTargetMicroArchitecture(t, localTempDir, noRoot) if err != nil { return Metadata{}, fmt.Errorf("failed to get ARM microarchitecture: %v", err) } @@ -97,7 +98,7 @@ func (c *ARMMetadataCollector) CollectMetadata(t target.Target, noRoot bool, noS return Metadata{}, fmt.Errorf("failed to get metadata scripts: %v", err) } - scriptOutputs, err := common.RunScripts(t, metadataScripts, true, localTempDir, nil, "", noRoot) // nosemgrep + scriptOutputs, err := workflow.RunScripts(t, metadataScripts, true, localTempDir, nil, "", noRoot) // nosemgrep if err != nil { return Metadata{}, fmt.Errorf("failed to run metadata scripts: %v", err) } @@ -166,7 +167,7 @@ func getNumGPCountersARM(t target.Target, localTempDir string, noRoot bool) (num Superuser: !noRoot, Architectures: []string{cpus.ARMArchitecture}, } - scriptOutput, err := common.RunScript(t, getScript, localTempDir, noRoot) + scriptOutput, err := workflow.RunScript(t, getScript, localTempDir, noRoot) if err != nil { err = fmt.Errorf("failed to run pmu driver version script: %v", err) return @@ -272,7 +273,7 @@ func getCPUSocketMapFromSysfs(t target.Target, socketCount, coresPerSocket, thre Superuser: false, Architectures: []string{cpus.ARMArchitecture}, } - scriptOutput, err := common.RunScript(t, getScript, localTempDir, false) + scriptOutput, err := workflow.RunScript(t, getScript, localTempDir, false) if err != nil || scriptOutput.Exitcode != 0 { // Fallback: assume single socket if sysfs read fails slog.Debug("failed to read CPU topology from sysfs, assuming single socket", slog.String("stderr", scriptOutput.Stderr)) diff --git a/cmd/metrics/metadata_x86.go b/cmd/metrics/metadata_x86.go index 261ae343..47078175 100644 --- a/cmd/metrics/metadata_x86.go +++ b/cmd/metrics/metadata_x86.go @@ -12,7 +12,8 @@ import ( "strconv" "strings" - "perfspect/internal/common" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/script" @@ -79,7 +80,7 @@ func (c *X86MetadataCollector) CollectMetadata(t target.Target, noRoot bool, noS } // CPU microarchitecture - metadata.Microarchitecture, err = common.GetTargetMicroArchitecture(t, localTempDir, noRoot) + metadata.Microarchitecture, err = workflow.GetTargetMicroArchitecture(t, localTempDir, noRoot) if err != nil { return Metadata{}, fmt.Errorf("failed to get x86 microarchitecture: %v", err) } @@ -96,7 +97,7 @@ func (c *X86MetadataCollector) CollectMetadata(t target.Target, noRoot bool, noS return Metadata{}, fmt.Errorf("failed to get metadata scripts: %v", err) } - scriptOutputs, err := common.RunScripts(t, metadataScripts, true, localTempDir, statusUpdate, "collecting metadata", noRoot) // nosemgrep + scriptOutputs, err := workflow.RunScripts(t, metadataScripts, true, localTempDir, statusUpdate, "collecting metadata", noRoot) // nosemgrep if err != nil { return Metadata{}, fmt.Errorf("failed to run metadata scripts: %v", err) } diff --git a/cmd/metrics/metrics.go b/cmd/metrics/metrics.go index d19b871d..4bc0a710 100644 --- a/cmd/metrics/metrics.go +++ b/cmd/metrics/metrics.go @@ -23,7 +23,9 @@ import ( "syscall" "time" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/workflow" + "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/script" @@ -39,14 +41,14 @@ import ( const cmdName = "metrics" var examples = []string{ - fmt.Sprintf(" Metrics from local host: $ %s %s --duration 30", common.AppName, cmdName), - fmt.Sprintf(" Metrics from local host in CSV format: $ %s %s --format csv", common.AppName, cmdName), - fmt.Sprintf(" Metrics from remote host: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Metrics for \"hot\" processes: $ %s %s --scope process", common.AppName, cmdName), - fmt.Sprintf(" Metrics for specified processes: $ %s %s --scope process --pids 1234,6789", common.AppName, cmdName), - fmt.Sprintf(" Start workload and collect metrics: $ %s %s -- /path/to/workload arg1 arg2", common.AppName, cmdName), - fmt.Sprintf(" Metrics adjusted for transaction rate: $ %s %s --txnrate 100", common.AppName, cmdName), - fmt.Sprintf(" \"Live\" metrics: $ %s %s --live", common.AppName, cmdName), + fmt.Sprintf(" Metrics from local host: $ %s %s --duration 30", app.Name, cmdName), + fmt.Sprintf(" Metrics from local host in CSV format: $ %s %s --format csv", app.Name, cmdName), + fmt.Sprintf(" Metrics from remote host: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Metrics for \"hot\" processes: $ %s %s --scope process", app.Name, cmdName), + fmt.Sprintf(" Metrics for specified processes: $ %s %s --scope process --pids 1234,6789", app.Name, cmdName), + fmt.Sprintf(" Start workload and collect metrics: $ %s %s -- /path/to/workload arg1 arg2", app.Name, cmdName), + fmt.Sprintf(" Metrics adjusted for transaction rate: $ %s %s --txnrate 100", app.Name, cmdName), + fmt.Sprintf(" \"Live\" metrics: $ %s %s --live", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -244,7 +246,7 @@ func init() { Cmd.Flags().BoolVar(&flagPrometheusServer, flagPrometheusServerName, false, "") Cmd.Flags().StringVar(&flagPrometheusServerAddr, flagPrometheusServerAddrName, ":9090", "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -280,10 +282,10 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup // collection options - flags := []common.Flag{ + flags := []app.Flag{ { Name: flagDurationName, Help: "number of seconds to run the collection. If 0, the collection will run indefinitely.", @@ -317,12 +319,12 @@ func getFlagGroups() []common.FlagGroup { Help: "range of CPUs to monitor. If not provided, all cores will be monitored.", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Collection Options", Flags: flags, }) // output options - flags = []common.Flag{ + flags = []app.Flag{ { Name: flagGranularityName, Help: fmt.Sprintf("level of metric granularity. Only valid when collecting at system scope. Options: %s.", strings.Join(granularityOptions, ", ")), @@ -348,12 +350,12 @@ func getFlagGroups() []common.FlagGroup { Help: "address (e.g., host:port) to start Prometheus metrics server on (implies --prometheus-server true)", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Output Options", Flags: flags, }) // advanced options - flags = []common.Flag{ + flags = []app.Flag{ { Name: flagShowMetricNamesName, Help: "show metric names available on this platform and exit", @@ -395,11 +397,11 @@ func getFlagGroups() []common.FlagGroup { Help: "do not include system summary table in report", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Advanced Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) + groups = append(groups, workflow.GetTargetFlagGroup()) return groups } @@ -408,54 +410,54 @@ func validateFlags(cmd *cobra.Command, args []string) error { if len(args) > 0 { argsWorkload = args if cmd.Flags().Lookup(flagScopeName).Changed { - return common.FlagValidationError(cmd, "scope is not supported with a workload") + return workflow.FlagValidationError(cmd, "scope is not supported with a workload") } if cmd.Flags().Lookup(flagDurationName).Changed { - return common.FlagValidationError(cmd, "duration is not supported with a workload") + return workflow.FlagValidationError(cmd, "duration is not supported with a workload") } if cmd.Flags().Lookup(flagPidListName).Changed { - return common.FlagValidationError(cmd, "pids are not supported with a workload") + return workflow.FlagValidationError(cmd, "pids are not supported with a workload") } if cmd.Flags().Lookup(flagCidListName).Changed { - return common.FlagValidationError(cmd, "cids are not supported with a workload") + return workflow.FlagValidationError(cmd, "cids are not supported with a workload") } if cmd.Flags().Lookup(flagFilterName).Changed { - return common.FlagValidationError(cmd, "filter is not supported with a workload") + return workflow.FlagValidationError(cmd, "filter is not supported with a workload") } if cmd.Flags().Lookup(flagRefreshName).Changed { - return common.FlagValidationError(cmd, "refresh is not supported with a workload") + return workflow.FlagValidationError(cmd, "refresh is not supported with a workload") } if cmd.Flags().Lookup(flagCountName).Changed { - return common.FlagValidationError(cmd, "count is not supported with a workload") + return workflow.FlagValidationError(cmd, "count is not supported with a workload") } if cmd.Flags().Lookup(flagCpuRangeName).Changed { - return common.FlagValidationError(cmd, "CPU range is not supported with a workload") + return workflow.FlagValidationError(cmd, "CPU range is not supported with a workload") } } // confirm valid duration if cmd.Flags().Lookup(flagDurationName).Changed && flagDuration != 0 && flagDuration < flagPerfPrintInterval { - return common.FlagValidationError(cmd, fmt.Sprintf("duration must be greater than or equal to the event collection interval (%d)", flagPerfPrintInterval)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("duration must be greater than or equal to the event collection interval (%d)", flagPerfPrintInterval)) } // confirm valid scope if cmd.Flags().Lookup(flagScopeName).Changed && !slices.Contains(scopeOptions, flagScope) { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid scope: %s, valid options are: %s", flagScope, strings.Join(scopeOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid scope: %s, valid options are: %s", flagScope, strings.Join(scopeOptions, ", "))) } // pids and cids are mutually exclusive if len(flagPidList) > 0 && len(flagCidList) > 0 { - return common.FlagValidationError(cmd, "cannot specify both pids and cids") + return workflow.FlagValidationError(cmd, "cannot specify both pids and cids") } // pid list changed if len(flagPidList) > 0 { // if scope was set and it wasn't set to process, error if cmd.Flags().Changed(flagScopeName) && flagScope != scopeProcess { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot specify pids when scope is not %s", scopeProcess)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot specify pids when scope is not %s", scopeProcess)) } // if scope wasn't set, set it to process flagScope = scopeProcess // verify PIDs are integers for _, pid := range flagPidList { if _, err := strconv.Atoi(pid); err != nil { - return common.FlagValidationError(cmd, "pids must be integers") + return workflow.FlagValidationError(cmd, "pids must be integers") } } } @@ -463,7 +465,7 @@ func validateFlags(cmd *cobra.Command, args []string) error { if len(flagCidList) > 0 { // if scope was set and it wasn't set to cgroup, error if cmd.Flags().Changed(flagScopeName) && flagScope != scopeCgroup { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot specify cids when scope is not %s", scopeCgroup)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot specify cids when scope is not %s", scopeCgroup)) } // if scope wasn't set, set it to cgroup flagScope = scopeCgroup @@ -472,49 +474,49 @@ func validateFlags(cmd *cobra.Command, args []string) error { if flagFilter != "" { // if scope isn't process or cgroup, error if flagScope != scopeProcess && flagScope != scopeCgroup { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot specify filter when scope is not %s or %s", scopeProcess, scopeCgroup)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot specify filter when scope is not %s or %s", scopeProcess, scopeCgroup)) } // if pids or cids are specified, error if len(flagPidList) > 0 || len(flagCidList) > 0 { - return common.FlagValidationError(cmd, "cannot specify filter when pids or cids are specified") + return workflow.FlagValidationError(cmd, "cannot specify filter when pids or cids are specified") } } // count changed if cmd.Flags().Lookup(flagCountName).Changed { // if scope isn't process or cgroup, error if flagScope != scopeProcess && flagScope != scopeCgroup { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot specify count when scope is not %s or %s", scopeProcess, scopeCgroup)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot specify count when scope is not %s or %s", scopeProcess, scopeCgroup)) } // if count is less than 1, error if flagCount < 1 { - return common.FlagValidationError(cmd, "count must be greater than 0") + return workflow.FlagValidationError(cmd, "count must be greater than 0") } // if pids or cids are specified, error if len(flagPidList) > 0 || len(flagCidList) > 0 { - return common.FlagValidationError(cmd, "cannot specify count when pids or cids are specified") + return workflow.FlagValidationError(cmd, "cannot specify count when pids or cids are specified") } } // refresh changed if cmd.Flags().Lookup(flagRefreshName).Changed { // if scope isn't process or cgroup, error if flagScope != scopeProcess && flagScope != scopeCgroup { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot specify refresh when scope is not %s or %s", scopeProcess, scopeCgroup)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot specify refresh when scope is not %s or %s", scopeProcess, scopeCgroup)) } // if pidlist or cidlist is set, error if len(flagPidList) > 0 || len(flagCidList) > 0 { - return common.FlagValidationError(cmd, "cannot specify refresh when pids or cids are specified") + return workflow.FlagValidationError(cmd, "cannot specify refresh when pids or cids are specified") } // if duration is set, error if flagDuration > 0 { - return common.FlagValidationError(cmd, "cannot specify refresh when duration is set") + return workflow.FlagValidationError(cmd, "cannot specify refresh when duration is set") } // if refresh is less than 1, error if flagRefresh < 0 { - return common.FlagValidationError(cmd, "refresh must be greater than or equal to 0") + return workflow.FlagValidationError(cmd, "refresh must be greater than or equal to 0") } // if refresh is less than perf print interval, error if flagRefresh < flagPerfPrintInterval { - return common.FlagValidationError(cmd, fmt.Sprintf("refresh must be greater than or equal to the event collection interval (%d)", flagPerfPrintInterval)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("refresh must be greater than or equal to the event collection interval (%d)", flagPerfPrintInterval)) } } // cpu range changed @@ -522,7 +524,7 @@ func validateFlags(cmd *cobra.Command, args []string) error { // error if granularity specifically set to other than CPU // only CPU granularity is allowed when specifying a CPU range if cmd.Flags().Lookup(flagGranularityName).Changed && flagGranularity != granularityCPU { - return common.FlagValidationError(cmd, fmt.Sprintf("cpu range can only be specified when granularity is %s. Current granularity is %s.", granularityCPU, flagGranularity)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cpu range can only be specified when granularity is %s. Current granularity is %s.", granularityCPU, flagGranularity)) } // set granularity to cpu if cpu range is specified and granularity not explicitly set @@ -535,18 +537,18 @@ func validateFlags(cmd *cobra.Command, args []string) error { // some basic validation on CPU range cpuList, err := util.SelectiveIntRangeToIntList(flagCpuRange) if err != nil { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid cpu range: %s", flagCpuRange)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid cpu range: %s", flagCpuRange)) } numCpus := len(cpuList) if numCpus == 0 { - return common.FlagValidationError(cmd, fmt.Sprintf("cpu range must contain at least one CPU, got: %s", flagCpuRange)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cpu range must contain at least one CPU, got: %s", flagCpuRange)) } // check if any entries in the cpu range are duplicates // error if so, since perf will not accept this input seen := make(map[int]bool) for _, cpu := range cpuList { if seen[cpu] { - return common.FlagValidationError(cmd, fmt.Sprintf("duplicate CPU in cpu range: %s", flagCpuRange)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("duplicate CPU in cpu range: %s", flagCpuRange)) } seen[cpu] = true } @@ -555,22 +557,22 @@ func validateFlags(cmd *cobra.Command, args []string) error { // output options // confirm valid granularity if cmd.Flags().Lookup(flagGranularityName).Changed && !slices.Contains(granularityOptions, flagGranularity) { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid granularity: %s, valid options are: %s", flagGranularity, strings.Join(granularityOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid granularity: %s, valid options are: %s", flagGranularity, strings.Join(granularityOptions, ", "))) } // if scope is not system, granularity must be system if flagGranularity != granularitySystem && flagScope != scopeSystem { - return common.FlagValidationError(cmd, fmt.Sprintf("granularity option must be %s when collecting at a scope other than %s", granularitySystem, scopeSystem)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("granularity option must be %s when collecting at a scope other than %s", granularitySystem, scopeSystem)) } // confirm valid output format for _, format := range flagOutputFormat { if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("invalid output format: %s, valid options are: %s", format, strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("invalid output format: %s, valid options are: %s", format, strings.Join(formatOptions, ", "))) } } // if live is set, output format must be only one of the formats if flagLive { if len(flagOutputFormat) != 1 { - return common.FlagValidationError(cmd, fmt.Sprintf("when --%s is set, only one output format can be specified with --%s", flagLiveName, flagOutputFormatName)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("when --%s is set, only one output format can be specified with --%s", flagLiveName, flagOutputFormatName)) } } // prometheus server address @@ -594,60 +596,60 @@ func validateFlags(cmd *cobra.Command, args []string) error { // confirm valid perf print interval if cmd.Flags().Lookup(flagPerfPrintIntervalName).Changed { if flagPerfPrintInterval < 1 { - return common.FlagValidationError(cmd, "event collection interval must be at least 1 second") + return workflow.FlagValidationError(cmd, "event collection interval must be at least 1 second") } // if perf print interval is greater than duration, error if flagDuration > 0 && flagPerfPrintInterval > flagDuration { - return common.FlagValidationError(cmd, fmt.Sprintf("event collection interval must be less than or equal to the duration (%d)", flagDuration)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("event collection interval must be less than or equal to the duration (%d)", flagDuration)) } // if refresh is relevant, perf print interval must be less than refresh relevant := flagRefresh > 0 && flagScope != scopeSystem && len(flagPidList) == 0 && len(flagCidList) == 0 if relevant && flagPerfPrintInterval > flagRefresh { - return common.FlagValidationError(cmd, fmt.Sprintf("event collection interval must be less than or equal to the refresh interval (%d)", flagRefresh)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("event collection interval must be less than or equal to the refresh interval (%d)", flagRefresh)) } } // confirm valid perf mux interval if cmd.Flags().Lookup(flagPerfMuxIntervalName).Changed && flagPerfMuxInterval < 10 { - return common.FlagValidationError(cmd, "mux interval must be at least 10 milliseconds") + return workflow.FlagValidationError(cmd, "mux interval must be at least 10 milliseconds") } // print events to file if flagWriteEventsToFile && flagLive { - return common.FlagValidationError(cmd, fmt.Sprintf("cannot write raw perf events to file when --%s is set", flagLiveName)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("cannot write raw perf events to file when --%s is set", flagLiveName)) } // only one output format if live if flagLive && len(flagOutputFormat) > 1 { - return common.FlagValidationError(cmd, fmt.Sprintf("specify one output format with --%s when --%s is set", flagOutputFormatName, flagLiveName)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("specify one output format with --%s when --%s is set", flagOutputFormatName, flagLiveName)) } // event file path if flagEventFilePath != "" { if _, err := os.Stat(flagEventFilePath); err != nil { if os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("event file path does not exist: %s", flagEventFilePath)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("event file path does not exist: %s", flagEventFilePath)) } - return common.FlagValidationError(cmd, fmt.Sprintf("failed to access event file path: %s, error: %v", flagEventFilePath, err)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("failed to access event file path: %s, error: %v", flagEventFilePath, err)) } } // metric file path if flagMetricFilePath != "" { if _, err := os.Stat(flagMetricFilePath); err != nil { if os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("metric file path does not exist: %s", flagMetricFilePath)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("metric file path does not exist: %s", flagMetricFilePath)) } - return common.FlagValidationError(cmd, fmt.Sprintf("failed to access metric file path: %s, error: %v", flagMetricFilePath, err)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("failed to access metric file path: %s, error: %v", flagMetricFilePath, err)) } } // input file path if flagInput != "" { if _, err := os.Stat(flagInput); err != nil { if os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("input file path does not exist: %s", flagInput)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("input file path does not exist: %s", flagInput)) } - return common.FlagValidationError(cmd, fmt.Sprintf("failed to access input file path: %s, error: %v", flagInput, err)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("failed to access input file path: %s, error: %v", flagInput, err)) } } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -846,7 +848,7 @@ func needsOutputDir(cmd *cobra.Command) bool { func runCmd(cmd *cobra.Command, args []string) error { // appContext is the application context that holds common data and resources. - appContext := cmd.Parent().Context().Value(common.AppContext{}).(common.AppContext) + appContext := cmd.Parent().Context().Value(app.Context{}).(app.Context) localTempDir := appContext.LocalTempDir localOutputDir := appContext.OutputDir // Setup signal manager for coordinated shutdown @@ -883,7 +885,7 @@ func runCmd(cmd *cobra.Command, args []string) error { } } // get the targets - myTargets, targetErrs, err := common.GetTargets(cmd, !flagNoRoot, !flagNoRoot, localTempDir) + myTargets, targetErrs, err := workflow.GetTargets(cmd, !flagNoRoot, !flagNoRoot, localTempDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) slog.Error(err.Error()) @@ -952,7 +954,7 @@ func runCmd(cmd *cobra.Command, args []string) error { } // check if all targets have the same architecture for _, target := range myTargets { - tArch, err := common.GetTargetArchitecture(target) + tArch, err := workflow.GetTargetArchitecture(target) if err != nil { err = fmt.Errorf("failed to get architecture: %w", err) fmt.Fprintf(os.Stderr, "Error: %v\n", err) @@ -960,7 +962,7 @@ func runCmd(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true return err } - tArch0, err := common.GetTargetArchitecture(myTargets[0]) + tArch0, err := workflow.GetTargetArchitecture(myTargets[0]) if err != nil { err = fmt.Errorf("failed to get architecture: %w", err) fmt.Fprintf(os.Stderr, "Error: %v\n", err) @@ -1146,8 +1148,8 @@ func prepareTarget(targetContext *targetContext, localTempDir string, channelErr _ = statusUpdate(myTarget.GetName(), "configuring target") // are PMUs being used on target? if !flagNoRoot { - if family, err := common.GetTargetFamily(myTarget); err == nil && cpus.IsIntelCPUFamilyStr(family) { - output, err := common.RunScript(myTarget, script.GetScriptByName(script.PMUBusyScriptName), localTempDir, flagNoRoot) + if family, err := workflow.GetTargetFamily(myTarget); err == nil && cpus.IsIntelCPUFamilyStr(family) { + output, err := workflow.RunScript(myTarget, script.GetScriptByName(script.PMUBusyScriptName), localTempDir, flagNoRoot) if err != nil { err = fmt.Errorf("failed to check if PMUs are in use: %w", err) _ = statusUpdate(myTarget.GetName(), fmt.Sprintf("Error: %v", err)) @@ -1197,7 +1199,7 @@ func prepareTarget(targetContext *targetContext, localTempDir string, channelErr perfMuxInterval := flagPerfMuxInterval if useDefaultMuxInterval { // set the default mux interval to 16ms for AMD architecture - vendor, err := common.GetTargetVendor(myTarget) + vendor, err := workflow.GetTargetVendor(myTarget) if err == nil && vendor == cpus.AMDVendor { perfMuxInterval = 16 } @@ -1416,7 +1418,7 @@ func runPerf(myTarget target.Target, noRoot bool, processes []Process, perfComma Superuser: !noRoot, } // start goroutine to run perf, output will be streamed back in provided channels - go common.RunScriptStream(myTarget, perfStatScript, localTempDir, stdoutChannel, stderrChannel, exitcodeChannel, scriptErrorChannel, cmdChannel, flagNoRoot) + go workflow.RunScriptStream(myTarget, perfStatScript, localTempDir, stdoutChannel, stderrChannel, exitcodeChannel, scriptErrorChannel, cmdChannel, flagNoRoot) // Drain stdout to avoid blocking if the workload writes to stdout. // perf stat emits event lines on stderr; stdout is not used by our pipeline. go func() { diff --git a/cmd/metrics/nmi_watchdog.go b/cmd/metrics/nmi_watchdog.go index e1ef1c44..a08af437 100644 --- a/cmd/metrics/nmi_watchdog.go +++ b/cmd/metrics/nmi_watchdog.go @@ -11,7 +11,8 @@ import ( "os/exec" "strings" - "perfspect/internal/common" + "perfspect/internal/workflow" + "perfspect/internal/script" "perfspect/internal/target" ) @@ -65,7 +66,7 @@ func setNMIWatchdog(myTarget target.Target, setting string, localTempDir string) if sysctl, err = findSysctl(myTarget); err != nil { return } - _, err = common.RunScript(myTarget, script.ScriptDefinition{ + _, err = workflow.RunScript(myTarget, script.ScriptDefinition{ Name: "set NMI watchdog", ScriptTemplate: fmt.Sprintf("%s kernel.nmi_watchdog=%s", sysctl, setting), Superuser: true}, diff --git a/cmd/metrics/perf_mux.go b/cmd/metrics/perf_mux.go index be06f186..ecce80db 100644 --- a/cmd/metrics/perf_mux.go +++ b/cmd/metrics/perf_mux.go @@ -10,7 +10,8 @@ import ( "strconv" "strings" - "perfspect/internal/common" + "perfspect/internal/workflow" + "perfspect/internal/script" "perfspect/internal/target" ) @@ -18,7 +19,7 @@ import ( // GetMuxIntervals - get a map of sysfs device file names to current mux value for the associated device func GetMuxIntervals(myTarget target.Target, localTempDir string) (intervals map[string]int, err error) { bash := "for file in $(find /sys/devices -type f -name perf_event_mux_interval_ms); do echo $file $(cat $file); done" - scriptOutput, err := common.RunScript(myTarget, script.ScriptDefinition{Name: "get mux intervals", ScriptTemplate: bash, Superuser: false}, localTempDir, flagNoRoot) + scriptOutput, err := workflow.RunScript(myTarget, script.ScriptDefinition{Name: "get mux intervals", ScriptTemplate: bash, Superuser: false}, localTempDir, flagNoRoot) if err != nil { return } @@ -40,13 +41,13 @@ func SetMuxIntervals(myTarget target.Target, intervals map[string]int, localTemp for device := range intervals { fmt.Fprintf(&bash, "echo %d > %s; ", intervals[device], device) } - _, err = common.RunScript(myTarget, script.ScriptDefinition{Name: "set mux intervals", ScriptTemplate: bash.String(), Superuser: true}, localTempDir, flagNoRoot) + _, err = workflow.RunScript(myTarget, script.ScriptDefinition{Name: "set mux intervals", ScriptTemplate: bash.String(), Superuser: true}, localTempDir, flagNoRoot) return } // SetAllMuxIntervals - writes the given interval (ms) to all perf mux sysfs device files func SetAllMuxIntervals(myTarget target.Target, interval int, localTempDir string) (err error) { bash := fmt.Sprintf("for file in $(find /sys/devices -type f -name perf_event_mux_interval_ms); do echo %d > $file; done", interval) - _, err = common.RunScript(myTarget, script.ScriptDefinition{Name: "set all mux intervals", ScriptTemplate: bash, Superuser: true}, localTempDir, flagNoRoot) + _, err = workflow.RunScript(myTarget, script.ScriptDefinition{Name: "set all mux intervals", ScriptTemplate: bash, Superuser: true}, localTempDir, flagNoRoot) return } diff --git a/cmd/metrics/process.go b/cmd/metrics/process.go index 4c92f1ae..b8b31017 100644 --- a/cmd/metrics/process.go +++ b/cmd/metrics/process.go @@ -13,7 +13,9 @@ import ( "regexp" "strings" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/workflow" + "perfspect/internal/script" "perfspect/internal/target" ) @@ -94,7 +96,7 @@ func GetHotProcesses(myTarget target.Target, maxProcesses int, filter string) (p comm := match[3] cmd := match[4] // skip processes that match the name of this program - if strings.Contains(cmd, filepath.Base(common.AppName)) { + if strings.Contains(cmd, filepath.Base(app.Name)) { slog.Debug("Skipping self", slog.String("PID", pid)) continue } @@ -158,7 +160,7 @@ done | sort -nr | head -n %d `, filter, maxCgroups), Superuser: true, } - output, err := common.RunScript(myTarget, hotCgroupsScript, localTempDir, flagNoRoot) + output, err := workflow.RunScript(myTarget, hotCgroupsScript, localTempDir, flagNoRoot) if err != nil { err = fmt.Errorf("failed to get hot cgroups: %v", err) return @@ -219,7 +221,7 @@ echo $cgroup_path `, cid), Superuser: true, } - output, err := common.RunScript(myTarget, cgroupScript, localTempDir, flagNoRoot) + output, err := workflow.RunScript(myTarget, cgroupScript, localTempDir, flagNoRoot) if err != nil { err = fmt.Errorf("failed to get cgroup: %v", err) return diff --git a/cmd/metrics/trim.go b/cmd/metrics/trim.go index 0add90de..1292fe89 100644 --- a/cmd/metrics/trim.go +++ b/cmd/metrics/trim.go @@ -11,7 +11,9 @@ import ( "path/filepath" "strings" - "perfspect/internal/common" + "perfspect/internal/app" + "perfspect/internal/workflow" + "perfspect/internal/util" "github.com/spf13/cobra" @@ -84,43 +86,43 @@ func validateTrimFlags(cmd *cobra.Command, args []string) error { // Check input file or directory exists if _, err := os.Stat(flagTrimInput); err != nil { if os.IsNotExist(err) { - return common.FlagValidationError(cmd, fmt.Sprintf("input file or directory does not exist: %s", flagTrimInput)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("input file or directory does not exist: %s", flagTrimInput)) } - return common.FlagValidationError(cmd, fmt.Sprintf("failed to access input file or directory: %v", err)) + return workflow.FlagValidationError(cmd, fmt.Sprintf("failed to access input file or directory: %v", err)) } // Check that at least one time parameter is provided if flagTrimStartTime == 0 && flagTrimEndTime == 0 && flagTrimStartOffset == 0 && flagTrimEndOffset == 0 { - return common.FlagValidationError(cmd, "at least one time parameter must be specified (--start-time, --end-time, --start-offset, or --end-offset)") + return workflow.FlagValidationError(cmd, "at least one time parameter must be specified (--start-time, --end-time, --start-offset, or --end-offset)") } // Check that both absolute time and offset are not specified for start if flagTrimStartTime != 0 && flagTrimStartOffset != 0 { - return common.FlagValidationError(cmd, "cannot specify both --start-time and --start-offset") + return workflow.FlagValidationError(cmd, "cannot specify both --start-time and --start-offset") } // Check that both absolute time and offset are not specified for end if flagTrimEndTime != 0 && flagTrimEndOffset != 0 { - return common.FlagValidationError(cmd, "cannot specify both --end-time and --end-offset") + return workflow.FlagValidationError(cmd, "cannot specify both --end-time and --end-offset") } // Check for negative values if flagTrimStartTime < 0 { - return common.FlagValidationError(cmd, "--start-time cannot be negative") + return workflow.FlagValidationError(cmd, "--start-time cannot be negative") } if flagTrimEndTime < 0 { - return common.FlagValidationError(cmd, "--end-time cannot be negative") + return workflow.FlagValidationError(cmd, "--end-time cannot be negative") } if flagTrimStartOffset < 0 { - return common.FlagValidationError(cmd, "--start-offset cannot be negative") + return workflow.FlagValidationError(cmd, "--start-offset cannot be negative") } if flagTrimEndOffset < 0 { - return common.FlagValidationError(cmd, "--end-offset cannot be negative") + return workflow.FlagValidationError(cmd, "--end-offset cannot be negative") } // Check that absolute times are in order if both specified if flagTrimStartTime != 0 && flagTrimEndTime != 0 && flagTrimStartTime >= flagTrimEndTime { - return common.FlagValidationError(cmd, "--start-time must be less than --end-time") + return workflow.FlagValidationError(cmd, "--start-time must be less than --end-time") } return nil @@ -129,7 +131,7 @@ func validateTrimFlags(cmd *cobra.Command, args []string) error { // runTrimCmd executes the trim command func runTrimCmd(cmd *cobra.Command, args []string) error { // appContext is the application context that holds common data and resources. - appContext := cmd.Parent().Context().Value(common.AppContext{}).(common.AppContext) + appContext := cmd.Parent().Context().Value(app.Context{}).(app.Context) outputDir := appContext.OutputDir // flagTrimInput can be a file or directory diff --git a/cmd/report/cpu.go b/cmd/report/cpu.go deleted file mode 100644 index 98b896fc..00000000 --- a/cmd/report/cpu.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -package report - -import ( - "fmt" - "log/slog" - "strconv" - "strings" - - "perfspect/internal/common" - "perfspect/internal/cpus" - "perfspect/internal/script" -) - -func numaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { - nodeCPUs := common.ValsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) - return strings.Join(nodeCPUs, " :: ") -} - -func ppinsFromOutput(outputs map[string]script.ScriptOutput) string { - uniquePpins := []string{} - for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { - parts := strings.Split(line, ":") - if len(parts) < 2 { - continue - } - ppin := strings.TrimSpace(parts[1]) - found := false - for _, p := range uniquePpins { - if string(p) == ppin { - found = true - break - } - } - if !found && ppin != "" { - uniquePpins = append(uniquePpins, ppin) - } - } - return strings.Join(uniquePpins, ", ") -} - -func channelsFromOutput(outputs map[string]script.ScriptOutput) string { - family := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := common.ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := common.ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) - part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) - dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) - cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, capid4, devices, implementer, part, dmidecodePart, "")) - if err != nil { - slog.Error("error getting CPU characteristics", slog.String("error", err.Error())) - return "" - } - return fmt.Sprintf("%d", cpu.MemoryChannelCount) -} - -func turboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { - vendor := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - switch vendor { - case cpus.IntelVendor: - val := common.ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) - if val == "true" { - return "Enabled" - } - if val == "false" { - return "Disabled" - } - return "" // unknown value - case cpus.AMDVendor: - val := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) - if val != "" { - return val + " (AMD Frequency Boost)" - } - } - return "" -} - -func chaCountFromOutput(outputs map[string]script.ScriptOutput) string { - // output is the result of three rdmsr calls - // - client cha count - // - cha count - // - spr cha count - // stop when we find a non-zero value - // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout - for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { - if hexCount != "" && hexCount != "0" { - count, err := strconv.ParseInt(hexCount, 16, 64) - if err == nil { - return fmt.Sprintf("%d", count) - } - } - } - return "" -} - -func numaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { - if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { - return "Enabled" - } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { - return "Disabled" - } - return "" -} - -func clusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { - uarch := common.UarchFromOutput(outputs) - sockets := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - nodes := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) - if uarch == "" || sockets == "" || nodes == "" { - return "" - } - socketCount, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("failed to parse socket count", slog.String("error", err.Error())) - return "" - } - nodeCount, err := strconv.Atoi(nodes) - if err != nil { - slog.Error("failed to parse node count", slog.String("error", err.Error())) - return "" - } - if nodeCount == 0 || socketCount == 0 { - slog.Error("node count or socket count is zero") - return "" - } - nodesPerSocket := nodeCount / socketCount - switch uarch { - case cpus.UarchGNR_X1: - return "All2All" - case cpus.UarchGNR_X2: - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case cpus.UarchGNR_X3: - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - case cpus.UarchSRF_SP: - return "UMA 2 (Hemi)" - case cpus.UarchSRF_AP: - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case cpus.UarchCWF: - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - } - return "" -} diff --git a/cmd/report/report.go b/cmd/report/report.go index e96940fe..690b619e 100644 --- a/cmd/report/report.go +++ b/cmd/report/report.go @@ -12,18 +12,19 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "perfspect/internal/common" + "perfspect/internal/app" "perfspect/internal/report" "perfspect/internal/table" + "perfspect/internal/workflow" ) const cmdName = "report" var examples = []string{ - fmt.Sprintf(" Data from local host: $ %s %s", common.AppName, cmdName), - fmt.Sprintf(" Specific data from local host: $ %s %s --bios --os --cpu --format html,json", common.AppName, cmdName), - fmt.Sprintf(" All data from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Data from multiple targets: $ %s %s --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Data from local host: $ %s %s", app.Name, cmdName), + fmt.Sprintf(" Specific data from local host: $ %s %s --bios --os --cpu --format html,json", app.Name, cmdName), + fmt.Sprintf(" All data from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Data from multiple targets: $ %s %s --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -114,7 +115,7 @@ const ( ) // categories maps flag names to tables that will be included in report -var categories = []common.Category{ +var categories = []app.Category{ {FlagName: flagSystemSummaryName, FlagVar: &flagSystemSummary, Help: "System Summary", Tables: []table.TableDefinition{tableDefinitions[SystemSummaryTableName]}}, {FlagName: flagHostName, FlagVar: &flagHost, Help: "Host", Tables: []table.TableDefinition{tableDefinitions[HostTableName]}}, {FlagName: flagBiosName, FlagVar: &flagBios, Help: "BIOS", Tables: []table.TableDefinition{tableDefinitions[BIOSTableName]}}, @@ -155,11 +156,11 @@ func init() { Cmd.Flags().BoolVar(cat.FlagVar, cat.FlagName, cat.DefaultValue, cat.Help) } // set up other flags - Cmd.Flags().StringVar(&common.FlagInput, common.FlagInputName, "", "") + Cmd.Flags().StringVar(&app.FlagInput, app.FlagInputName, "", "") Cmd.Flags().BoolVar(&flagAll, flagAllName, true, "") - Cmd.Flags().StringSliceVar(&common.FlagFormat, common.FlagFormatName, []string{report.FormatAll}, "") + Cmd.Flags().StringSliceVar(&app.FlagFormat, app.FlagFormatName, []string{report.FormatAll}, "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -189,42 +190,42 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup - flags := []common.Flag{ +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup + flags := []app.Flag{ { Name: flagAllName, Help: "report configuration for all categories", }, } for _, cat := range categories { - flags = append(flags, common.Flag{ + flags = append(flags, app.Flag{ Name: cat.FlagName, Help: cat.Help, }) } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Categories", Flags: flags, }) - flags = []common.Flag{ + flags = []app.Flag{ { - Name: common.FlagFormatName, + Name: app.FlagFormatName, Help: fmt.Sprintf("choose output format(s) from: %s", strings.Join(append([]string{report.FormatAll}, report.FormatOptions...), ", ")), }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Other Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) - flags = []common.Flag{ + groups = append(groups, workflow.GetTargetFlagGroup()) + flags = []app.Flag{ { - Name: common.FlagInputName, + Name: app.FlagInputName, Help: "\".raw\" file, or directory containing \".raw\" files. Will skip data collection and use raw data for reports.", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Advanced Options", Flags: flags, }) @@ -242,15 +243,15 @@ func validateFlags(cmd *cobra.Command, args []string) error { } } // validate format options - for _, format := range common.FlagFormat { + for _, format := range app.FlagFormat { formatOptions := append([]string{report.FormatAll}, report.FormatOptions...) if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) } } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -264,11 +265,11 @@ func runCmd(cmd *cobra.Command, args []string) error { } } // include insights table if all categories are selected - var insightsFunc common.InsightsFunc + var insightsFunc app.InsightsFunc if flagAll { - insightsFunc = common.DefaultInsightsFunc + insightsFunc = workflow.DefaultInsightsFunc } - reportingCommand := common.ReportingCommand{ + reportingCommand := workflow.ReportingCommand{ Cmd: cmd, Tables: tables, InsightsFunc: insightsFunc, diff --git a/cmd/report/report_tables.go b/cmd/report/report_tables.go index 9da0c5f6..1558614c 100644 --- a/cmd/report/report_tables.go +++ b/cmd/report/report_tables.go @@ -14,8 +14,8 @@ import ( "strconv" "strings" - "perfspect/internal/common" "perfspect/internal/cpus" + "perfspect/internal/extract" "perfspect/internal/report" "perfspect/internal/script" "perfspect/internal/table" @@ -470,15 +470,15 @@ var tableDefinitions = map[string]table.TableDefinition{ func hostTableValues(outputs map[string]script.ScriptOutput) []table.Field { hostName := strings.TrimSpace(outputs[script.HostnameScriptName].Stdout) time := strings.TrimSpace(outputs[script.DateScriptName].Stdout) - system := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) - baseboard := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) - chassis := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) + system := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) + baseboard := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) + chassis := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) return []table.Field{ {Name: "Host Name", Values: []string{hostName}}, {Name: "Time", Values: []string{time}}, @@ -489,7 +489,7 @@ func hostTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func pcieSlotsTableValues(outputs map[string]script.ScriptOutput) []table.Field { - fieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "9", + fieldValues := extract.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "9", []string{ `^Designation:\s*(.+?)$`, `^Type:\s*(.+?)$`, @@ -522,7 +522,7 @@ func biosTableValues(outputs map[string]script.ScriptOutput) []table.Field { {Name: "Version"}, {Name: "Release Date"}, } - fieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", + fieldValues := extract.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", []string{ `^Vendor:\s*(.+?)$`, `^Version:\s*(.+?)$`, @@ -543,72 +543,72 @@ func biosTableValues(outputs map[string]script.ScriptOutput) []table.Field { func operatingSystemTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "OS", Values: []string{common.OperatingSystemFromOutput(outputs)}}, - {Name: "Kernel", Values: []string{common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, + {Name: "OS", Values: []string{extract.OperatingSystemFromOutput(outputs)}}, + {Name: "Kernel", Values: []string{extract.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, {Name: "Boot Parameters", Values: []string{strings.TrimSpace(outputs[script.ProcCmdlineScriptName].Stdout)}}, - {Name: "Microcode", Values: []string{common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, + {Name: "Microcode", Values: []string{extract.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, } } func softwareVersionTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "GCC", Values: []string{common.ValFromRegexSubmatch(outputs[script.GccVersionScriptName].Stdout, `^(gcc .*)$`)}}, - {Name: "GLIBC", Values: []string{common.ValFromRegexSubmatch(outputs[script.GlibcVersionScriptName].Stdout, `^(ldd .*)`)}}, - {Name: "Binutils", Values: []string{common.ValFromRegexSubmatch(outputs[script.BinutilsVersionScriptName].Stdout, `^(GNU ld .*)$`)}}, - {Name: "Python", Values: []string{common.ValFromRegexSubmatch(outputs[script.PythonVersionScriptName].Stdout, `^(Python .*)$`)}}, - {Name: "Python3", Values: []string{common.ValFromRegexSubmatch(outputs[script.Python3VersionScriptName].Stdout, `^(Python 3.*)$`)}}, - {Name: "Java", Values: []string{common.ValFromRegexSubmatch(outputs[script.JavaVersionScriptName].Stdout, `^(openjdk .*)$`)}}, - {Name: "OpenSSL", Values: []string{common.ValFromRegexSubmatch(outputs[script.OpensslVersionScriptName].Stdout, `^(OpenSSL .*)$`)}}, + {Name: "GCC", Values: []string{extract.ValFromRegexSubmatch(outputs[script.GccVersionScriptName].Stdout, `^(gcc .*)$`)}}, + {Name: "GLIBC", Values: []string{extract.ValFromRegexSubmatch(outputs[script.GlibcVersionScriptName].Stdout, `^(ldd .*)`)}}, + {Name: "Binutils", Values: []string{extract.ValFromRegexSubmatch(outputs[script.BinutilsVersionScriptName].Stdout, `^(GNU ld .*)$`)}}, + {Name: "Python", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PythonVersionScriptName].Stdout, `^(Python .*)$`)}}, + {Name: "Python3", Values: []string{extract.ValFromRegexSubmatch(outputs[script.Python3VersionScriptName].Stdout, `^(Python 3.*)$`)}}, + {Name: "Java", Values: []string{extract.ValFromRegexSubmatch(outputs[script.JavaVersionScriptName].Stdout, `^(openjdk .*)$`)}}, + {Name: "OpenSSL", Values: []string{extract.ValFromRegexSubmatch(outputs[script.OpensslVersionScriptName].Stdout, `^(OpenSSL .*)$`)}}, } } func cpuTableValues(outputs map[string]script.ScriptOutput) []table.Field { var l1d, l1i, l2 string - lscpuCache, err := common.ParseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) + lscpuCache, err := extract.ParseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) if err != nil { slog.Warn("failed to parse lscpu cache output", "error", err) } else { if _, ok := lscpuCache["L1d"]; ok { - l1d = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L1d"]) + l1d = extract.L1l2CacheSizeFromLscpuCache(lscpuCache["L1d"]) } if _, ok := lscpuCache["L1i"]; ok { - l1i = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L1i"]) + l1i = extract.L1l2CacheSizeFromLscpuCache(lscpuCache["L1i"]) } if _, ok := lscpuCache["L2"]; ok { - l2 = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L2"]) + l2 = extract.L1l2CacheSizeFromLscpuCache(lscpuCache["L2"]) } } return []table.Field{ - {Name: "CPU Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, - {Name: "Architecture", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, - {Name: "Microarchitecture", Values: []string{common.UarchFromOutput(outputs)}}, - {Name: "Family", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`)}}, - {Name: "Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`)}}, - {Name: "Stepping", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`)}}, - {Name: "Base Frequency", Values: []string{common.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, - {Name: "Maximum Frequency", Values: []string{common.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, - {Name: "All-core Maximum Frequency", Values: []string{common.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, - {Name: "CPUs", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, - {Name: "On-line CPU List", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`)}}, - {Name: "Hyperthreading", Values: []string{common.HyperthreadingFromOutput(outputs)}}, - {Name: "Cores per Socket", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, - {Name: "Sockets", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, - {Name: "NUMA Nodes", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, - {Name: "NUMA CPU List", Values: []string{numaCPUListFromOutput(outputs)}}, + {Name: "CPU Model", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, + {Name: "Architecture", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{extract.UarchFromOutput(outputs)}}, + {Name: "Family", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`)}}, + {Name: "Model", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`)}}, + {Name: "Stepping", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`)}}, + {Name: "Base Frequency", Values: []string{extract.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, + {Name: "Maximum Frequency", Values: []string{extract.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, + {Name: "All-core Maximum Frequency", Values: []string{extract.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, + {Name: "CPUs", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, + {Name: "On-line CPU List", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`)}}, + {Name: "Hyperthreading", Values: []string{extract.HyperthreadingFromOutput(outputs)}}, + {Name: "Cores per Socket", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Sockets", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "NUMA Nodes", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, + {Name: "NUMA CPU List", Values: []string{extract.NumaCPUListFromOutput(outputs)}}, {Name: "L1d Cache", Values: []string{l1d}, Description: "The size of the L1 data cache for one core."}, {Name: "L1i Cache", Values: []string{l1i}, Description: "The size of the L1 instruction cache for one core."}, {Name: "L2 Cache", Values: []string{l2}, Description: "The size of the L2 cache for one core."}, - {Name: "L3 Cache (instance/total)", Values: []string{common.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, - {Name: "L3 per Core", Values: []string{common.L3PerCoreFromOutput(outputs)}, Description: "The L3 cache size per core."}, - {Name: "Memory Channels", Values: []string{channelsFromOutput(outputs)}}, - {Name: "Intel Turbo Boost", Values: []string{turboEnabledFromOutput(outputs)}}, - {Name: "Virtualization", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization:\s*(.+)$`)}}, - {Name: "PPINs", Values: []string{ppinsFromOutput(outputs)}}, + {Name: "L3 Cache (instance/total)", Values: []string{extract.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, + {Name: "L3 per Core", Values: []string{extract.L3PerCoreFromOutput(outputs)}, Description: "The L3 cache size per core."}, + {Name: "Memory Channels", Values: []string{extract.ChannelsFromOutput(outputs)}}, + {Name: "Intel Turbo Boost", Values: []string{extract.TurboEnabledFromOutput(outputs)}}, + {Name: "Virtualization", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization:\s*(.+)$`)}}, + {Name: "PPINs", Values: []string{extract.PPINsFromOutput(outputs)}}, } } func prefetcherTableValues(outputs map[string]script.ScriptOutput) []table.Field { - prefetchers := common.PrefetchersFromOutput(outputs) + prefetchers := extract.PrefetchersFromOutput(outputs) if len(prefetchers) == 0 { return []table.Field{} } @@ -696,8 +696,8 @@ func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues table. func isaTableValues(outputs map[string]script.ScriptOutput) []table.Field { fields := []table.Field{} - supported := isaSupportedFromOutput(outputs) - for i, isa := range isaFullNames() { + supported := extract.ISASupportedFromOutput(outputs) + for i, isa := range extract.ISAFullNames() { fields = append(fields, table.Field{ Name: isa, Values: []string{supported[i]}, @@ -707,16 +707,16 @@ func isaTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func acceleratorTableValues(outputs map[string]script.ScriptOutput) []table.Field { - names := acceleratorNames() + names := extract.AcceleratorNames() if len(names) == 0 { return []table.Field{} } return []table.Field{ {Name: "Name", Values: names}, - {Name: "Count", Values: acceleratorCountsFromOutput(outputs)}, - {Name: "Work Queues", Values: acceleratorWorkQueuesFromOutput(outputs)}, - {Name: "Full Name", Values: acceleratorFullNamesFromYaml()}, - {Name: "Description", Values: acceleratorDescriptionsFromYaml()}, + {Name: "Count", Values: extract.AcceleratorCountsFromOutput(outputs)}, + {Name: "Work Queues", Values: extract.AcceleratorWorkQueuesFromOutput(outputs)}, + {Name: "Full Name", Values: extract.AcceleratorFullNames()}, + {Name: "Description", Values: extract.AcceleratorDescriptions()}, } } @@ -758,9 +758,9 @@ func acceleratorTableInsights(outputs map[string]script.ScriptOutput, tableValue func powerTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "TDP", Values: []string{common.TDPFromOutput(outputs)}}, - {Name: "Energy Performance Bias", Values: []string{common.EPBFromOutput(outputs)}}, - {Name: "Energy Performance Preference", Values: []string{common.EPPFromOutput(outputs)}}, + {Name: "TDP", Values: []string{extract.TDPFromOutput(outputs)}}, + {Name: "Energy Performance Bias", Values: []string{extract.EPBFromOutput(outputs)}}, + {Name: "Energy Performance Preference", Values: []string{extract.EPPFromOutput(outputs)}}, {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, } @@ -790,7 +790,7 @@ func powerTableInsights(outputs map[string]script.ScriptOutput, tableValues tabl } func cstateTableValues(outputs map[string]script.ScriptOutput) []table.Field { - cstates := common.CstatesFromOutput(outputs) + cstates := extract.CstatesFromOutput(outputs) if len(cstates) == 0 { return []table.Field{} } @@ -806,32 +806,32 @@ func cstateTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func uncoreTableValues(outputs map[string]script.ScriptOutput) []table.Field { - uarch := common.UarchFromOutput(outputs) + uarch := extract.UarchFromOutput(outputs) if uarch == "" { slog.Error("failed to get uarch from script outputs") return []table.Field{} } if strings.Contains(uarch, cpus.UarchSRF) || strings.Contains(uarch, cpus.UarchGNR) || strings.Contains(uarch, cpus.UarchCWF) { return []table.Field{ - {Name: "Min Frequency (Compute)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, - {Name: "Min Frequency (I/O)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, - {Name: "Max Frequency (Compute)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, - {Name: "Max Frequency (I/O)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, - {Name: "CHA Count", Values: []string{chaCountFromOutput(outputs)}}, + {Name: "Min Frequency (Compute)", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, + {Name: "Min Frequency (I/O)", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, + {Name: "Max Frequency (Compute)", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, + {Name: "Max Frequency (I/O)", Values: []string{extract.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, + {Name: "CHA Count", Values: []string{extract.ChaCountFromOutput(outputs)}}, } } else { // field counts need to match for the all_hosts reports to work properly return []table.Field{ - {Name: "Min Frequency", Values: []string{common.UncoreMinFrequencyFromOutput(outputs)}}, + {Name: "Min Frequency", Values: []string{extract.UncoreMinFrequencyFromOutput(outputs)}}, {Name: "N/A", Values: []string{""}}, - {Name: "Max Frequency", Values: []string{common.UncoreMaxFrequencyFromOutput(outputs)}}, + {Name: "Max Frequency", Values: []string{extract.UncoreMaxFrequencyFromOutput(outputs)}}, {Name: "N/A", Values: []string{""}}, - {Name: "CHA Count", Values: []string{chaCountFromOutput(outputs)}}, + {Name: "CHA Count", Values: []string{extract.ChaCountFromOutput(outputs)}}, } } } func elcTableValues(outputs map[string]script.ScriptOutput) []table.Field { - return common.ELCFieldValuesFromOutput(outputs) + return extract.ELCFieldValuesFromOutput(outputs) } func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { @@ -871,7 +871,7 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues table. } } // if epb is not set to 'Performance (0)' and ELC mode is set to 'Latency Optimized', suggest setting epb to 'Performance (0)' - epb := common.EPBFromOutput(outputs) + epb := extract.EPBFromOutput(outputs) if epb != "" && epb != "Performance (0)" && firstMode == "Latency Optimized" { insights = append(insights, table.Insight{ Recommendation: "Consider setting Energy Performance Bias to 'Performance (0)' to allow Latency Optimized mode to operate as designed.", @@ -883,7 +883,7 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues table. } func maximumFrequencyTableValues(outputs map[string]script.ScriptOutput) []table.Field { - frequencyBuckets, err := common.GetSpecFrequencyBuckets(outputs) + frequencyBuckets, err := extract.GetSpecFrequencyBuckets(outputs) if err != nil { slog.Warn("unable to get spec core frequencies", slog.String("error", err.Error())) return []table.Field{} @@ -994,19 +994,19 @@ func sstTFLPTableValues(outputs map[string]script.ScriptOutput) []table.Field { func memoryTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ - {Name: "Installed Memory", Values: []string{common.InstalledMemoryFromOutput(outputs)}}, - {Name: "MemTotal", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, - {Name: "MemFree", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemFree:\s*(.+?)$`)}}, - {Name: "MemAvailable", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemAvailable:\s*(.+?)$`)}}, - {Name: "Buffers", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Buffers:\s*(.+?)$`)}}, - {Name: "Cached", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Cached:\s*(.+?)$`)}}, - {Name: "HugePages_Total", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^HugePages_Total:\s*(.+?)$`)}}, - {Name: "Hugepagesize", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, - {Name: "Transparent Huge Pages", Values: []string{common.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, - {Name: "Automatic NUMA Balancing", Values: []string{numaBalancingFromOutput(outputs)}}, - {Name: "Populated Memory Channels", Values: []string{populatedChannelsFromOutput(outputs)}}, + {Name: "Installed Memory", Values: []string{extract.InstalledMemoryFromOutput(outputs)}}, + {Name: "MemTotal", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, + {Name: "MemFree", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemFree:\s*(.+?)$`)}}, + {Name: "MemAvailable", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemAvailable:\s*(.+?)$`)}}, + {Name: "Buffers", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Buffers:\s*(.+?)$`)}}, + {Name: "Cached", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Cached:\s*(.+?)$`)}}, + {Name: "HugePages_Total", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^HugePages_Total:\s*(.+?)$`)}}, + {Name: "Hugepagesize", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, + {Name: "Transparent Huge Pages", Values: []string{extract.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, + {Name: "Automatic NUMA Balancing", Values: []string{extract.NumaBalancingFromOutput(outputs)}}, + {Name: "Populated Memory Channels", Values: []string{extract.PopulatedChannelsFromOutput(outputs)}}, {Name: "Total Memory Encryption (TME)", Values: []string{strings.TrimSpace(outputs[script.TmeScriptName].Stdout)}}, - {Name: "Clustering Mode", Values: []string{clusteringModeFromOutput(outputs)}}, + {Name: "Clustering Mode", Values: []string{extract.ClusteringModeFromOutput(outputs)}}, } } @@ -1019,13 +1019,13 @@ func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues tab } else { populatedChannels := tableValues.Fields[populatedChannelsIndex].Values[0] if populatedChannels != "" { - uarch := common.UarchFromOutput(outputs) + uarch := extract.UarchFromOutput(outputs) if uarch != "" { cpu, err := cpus.GetCPUByMicroArchitecture(uarch) if err != nil { slog.Warn(err.Error()) } else { - sockets := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + sockets := extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) socketCount, err := strconv.Atoi(sockets) if err != nil { slog.Warn(err.Error()) @@ -1043,7 +1043,7 @@ func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues tab } } // check if NUMA balancing is not enabled (when there are multiple NUMA nodes) - nodes := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) + nodes := extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) nodeCount, err := strconv.Atoi(nodes) if err != nil { slog.Warn(err.Error()) @@ -1068,7 +1068,7 @@ func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues tab } func dimmTableValues(outputs map[string]script.ScriptOutput) []table.Field { - dimmFieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "17", + dimmFieldValues := extract.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "17", []string{ `^Bank Locator:\s*(.+?)$`, `^Locator:\s*(.+?)$`, @@ -1107,7 +1107,7 @@ func dimmTableValues(outputs map[string]script.ScriptOutput) []table.Field { fields[fieldIndex].Values = append(fields[fieldIndex].Values, dimmFieldValues[dimmIndex][fieldIndex]) } } - derivedDimmFieldValues := derivedDimmsFieldFromOutput(outputs) + derivedDimmFieldValues := extract.DerivedDimmsFieldFromOutput(outputs) if len(dimmFieldValues) != len(derivedDimmFieldValues) { slog.Warn("unable to derive socket, channel, and slot for all DIMMs") // fill with empty strings @@ -1116,9 +1116,9 @@ func dimmTableValues(outputs map[string]script.ScriptOutput) []table.Field { fields[13].Values = append(fields[13].Values, make([]string, len(dimmFieldValues))...) } else { for i := range derivedDimmFieldValues { - fields[11].Values = append(fields[11].Values, derivedDimmFieldValues[i].socket) - fields[12].Values = append(fields[12].Values, derivedDimmFieldValues[i].channel) - fields[13].Values = append(fields[13].Values, derivedDimmFieldValues[i].slot) + fields[11].Values = append(fields[11].Values, derivedDimmFieldValues[i].Socket) + fields[12].Values = append(fields[12].Values, derivedDimmFieldValues[i].Channel) + fields[13].Values = append(fields[13].Values, derivedDimmFieldValues[i].Slot) } } return fields @@ -1166,7 +1166,7 @@ func dimmTableInsights(outputs map[string]script.ScriptOutput, tableValues table } func nicTableValues(outputs map[string]script.ScriptOutput) []table.Field { - allNicsInfo := common.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) + allNicsInfo := extract.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(allNicsInfo) == 0 { return []table.Field{} } @@ -1232,7 +1232,7 @@ func nicTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []table.Field { - allNicsInfo := common.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) + allNicsInfo := extract.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(allNicsInfo) == 0 { return []table.Field{} } @@ -1294,7 +1294,7 @@ func formatQueueCPUMappings(mappings map[string]string, prefix string) string { } func nicCpuAffinityTableValues(outputs map[string]script.ScriptOutput) []table.Field { - nicIRQMappings := common.NICIrqMappingsFromOutput(outputs) + nicIRQMappings := extract.NICIrqMappingsFromOutput(outputs) if len(nicIRQMappings) == 0 { return []table.Field{} } @@ -1348,7 +1348,7 @@ func networkConfigTableValues(outputs map[string]script.ScriptOutput) []table.Fi } func diskTableValues(outputs map[string]script.ScriptOutput) []table.Field { - allDisksInfo := common.DiskInfoFromOutput(outputs) + allDisksInfo := extract.DiskInfoFromOutput(outputs) if len(allDisksInfo) == 0 { return []table.Field{} } @@ -1388,7 +1388,7 @@ func diskTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func filesystemTableValues(outputs map[string]script.ScriptOutput) []table.Field { - return filesystemFieldValuesFromOutput(outputs) + return extract.FilesystemFieldValuesFromOutput(outputs) } func filesystemTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { @@ -1410,7 +1410,7 @@ func filesystemTableInsights(outputs map[string]script.ScriptOutput, tableValues } func gpuTableValues(outputs map[string]script.ScriptOutput) []table.Field { - gpuInfos := gpuInfoFromOutput(outputs) + gpuInfos := extract.GPUInfoFromOutput(outputs) if len(gpuInfos) == 0 { return []table.Field{} } @@ -1428,7 +1428,7 @@ func gpuTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func gaudiTableValues(outputs map[string]script.ScriptOutput) []table.Field { - gaudiInfos := gaudiInfoFromOutput(outputs) + gaudiInfos := extract.GaudiInfoFromOutput(outputs) if len(gaudiInfos) == 0 { return []table.Field{} } @@ -1458,7 +1458,7 @@ func gaudiTableValues(outputs map[string]script.ScriptOutput) []table.Field { } func cxlTableValues(outputs map[string]script.ScriptOutput) []table.Field { - cxlDevices := getPCIDevices("CXL", outputs) + cxlDevices := extract.GetPCIDevices("CXL", outputs) if len(cxlDevices) == 0 { return []table.Field{} } @@ -1486,7 +1486,7 @@ func cxlTableValues(outputs map[string]script.ScriptOutput) []table.Field { func cveTableValues(outputs map[string]script.ScriptOutput) []table.Field { fields := []table.Field{} - cves := cveInfoFromOutput(outputs) + cves := extract.CVEInfoFromOutput(outputs) for _, cve := range cves { fields = append(fields, table.Field{Name: cve[0], Values: []string{cve[1]}}) } @@ -1622,31 +1622,31 @@ func kernelLogTableValues(outputs map[string]script.ScriptOutput) []table.Field func pmuTableValues(outputs map[string]script.ScriptOutput) []table.Field { return []table.Field{ {Name: "PMU Driver Version", Values: []string{strings.TrimSpace(outputs[script.PMUDriverVersionScriptName].Stdout)}}, - {Name: "cpu_cycles", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30a (.*)$`)}}, - {Name: "instructions", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x309 (.*)$`)}}, - {Name: "ref_cycles", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30b (.*)$`)}}, - {Name: "topdown_slots", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30c (.*)$`)}}, - {Name: "gen_programmable_1", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc1 (.*)$`)}}, - {Name: "gen_programmable_2", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc2 (.*)$`)}}, - {Name: "gen_programmable_3", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc3 (.*)$`)}}, - {Name: "gen_programmable_4", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc4 (.*)$`)}}, - {Name: "gen_programmable_5", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc5 (.*)$`)}}, - {Name: "gen_programmable_6", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc6 (.*)$`)}}, - {Name: "gen_programmable_7", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc7 (.*)$`)}}, - {Name: "gen_programmable_8", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc8 (.*)$`)}}, + {Name: "cpu_cycles", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30a (.*)$`)}}, + {Name: "instructions", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x309 (.*)$`)}}, + {Name: "ref_cycles", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30b (.*)$`)}}, + {Name: "topdown_slots", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30c (.*)$`)}}, + {Name: "gen_programmable_1", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc1 (.*)$`)}}, + {Name: "gen_programmable_2", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc2 (.*)$`)}}, + {Name: "gen_programmable_3", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc3 (.*)$`)}}, + {Name: "gen_programmable_4", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc4 (.*)$`)}}, + {Name: "gen_programmable_5", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc5 (.*)$`)}}, + {Name: "gen_programmable_6", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc6 (.*)$`)}}, + {Name: "gen_programmable_7", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc7 (.*)$`)}}, + {Name: "gen_programmable_8", Values: []string{extract.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc8 (.*)$`)}}, } } func systemSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Field { - system := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) - baseboard := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) - chassis := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + - " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + - ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) + system := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) + baseboard := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) + chassis := extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + + " " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + + ", " + extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) return []table.Field{ {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, @@ -1654,94 +1654,94 @@ func systemSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Fi {Name: "System", Values: []string{system}}, {Name: "Baseboard", Values: []string{baseboard}}, {Name: "Chassis", Values: []string{chassis}}, - {Name: "CPU Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, - {Name: "Architecture", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, - {Name: "Microarchitecture", Values: []string{common.UarchFromOutput(outputs)}}, - {Name: "L3 Cache (instance/total)", Values: []string{common.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, - {Name: "Cores per Socket", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, - {Name: "Sockets", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, - {Name: "Hyperthreading", Values: []string{common.HyperthreadingFromOutput(outputs)}}, - {Name: "CPUs", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, - {Name: "Intel Turbo Boost", Values: []string{turboEnabledFromOutput(outputs)}}, - {Name: "Base Frequency", Values: []string{common.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, - {Name: "Maximum Frequency", Values: []string{common.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, - {Name: "All-core Maximum Frequency", Values: []string{common.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, - {Name: "NUMA Nodes", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, - {Name: "Prefetchers", Values: []string{common.PrefetchersSummaryFromOutput(outputs)}}, - {Name: "PPINs", Values: []string{ppinsFromOutput(outputs)}}, - {Name: "Accelerators Available [used]", Values: []string{acceleratorSummaryFromOutput(outputs)}}, - {Name: "Installed Memory", Values: []string{common.InstalledMemoryFromOutput(outputs)}}, - {Name: "Hugepagesize", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, - {Name: "Transparent Huge Pages", Values: []string{common.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, - {Name: "Automatic NUMA Balancing", Values: []string{numaBalancingFromOutput(outputs)}}, - {Name: "NIC", Values: []string{common.NICSummaryFromOutput(outputs)}}, - {Name: "Disk", Values: []string{common.DiskSummaryFromOutput(outputs)}}, - {Name: "BIOS", Values: []string{common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `^Version:\s*(.+?)$`)}}, - {Name: "Microcode", Values: []string{common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, - {Name: "OS", Values: []string{common.OperatingSystemFromOutput(outputs)}}, - {Name: "Kernel", Values: []string{common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, - {Name: "TDP", Values: []string{common.TDPFromOutput(outputs)}}, - {Name: "Energy Performance Bias", Values: []string{common.EPBFromOutput(outputs)}}, + {Name: "CPU Model", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, + {Name: "Architecture", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{extract.UarchFromOutput(outputs)}}, + {Name: "L3 Cache (instance/total)", Values: []string{extract.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, + {Name: "Cores per Socket", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Sockets", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "Hyperthreading", Values: []string{extract.HyperthreadingFromOutput(outputs)}}, + {Name: "CPUs", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, + {Name: "Intel Turbo Boost", Values: []string{extract.TurboEnabledFromOutput(outputs)}}, + {Name: "Base Frequency", Values: []string{extract.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, + {Name: "Maximum Frequency", Values: []string{extract.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, + {Name: "All-core Maximum Frequency", Values: []string{extract.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, + {Name: "NUMA Nodes", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, + {Name: "Prefetchers", Values: []string{extract.PrefetchersSummaryFromOutput(outputs)}}, + {Name: "PPINs", Values: []string{extract.PPINsFromOutput(outputs)}}, + {Name: "Accelerators Available [used]", Values: []string{extract.AcceleratorSummaryFromOutput(outputs)}}, + {Name: "Installed Memory", Values: []string{extract.InstalledMemoryFromOutput(outputs)}}, + {Name: "Hugepagesize", Values: []string{extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, + {Name: "Transparent Huge Pages", Values: []string{extract.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, + {Name: "Automatic NUMA Balancing", Values: []string{extract.NumaBalancingFromOutput(outputs)}}, + {Name: "NIC", Values: []string{extract.NICSummaryFromOutput(outputs)}}, + {Name: "Disk", Values: []string{extract.DiskSummaryFromOutput(outputs)}}, + {Name: "BIOS", Values: []string{extract.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `^Version:\s*(.+?)$`)}}, + {Name: "Microcode", Values: []string{extract.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, + {Name: "OS", Values: []string{extract.OperatingSystemFromOutput(outputs)}}, + {Name: "Kernel", Values: []string{extract.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, + {Name: "TDP", Values: []string{extract.TDPFromOutput(outputs)}}, + {Name: "Energy Performance Bias", Values: []string{extract.EPBFromOutput(outputs)}}, {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, - {Name: "C-states", Values: []string{common.CstatesSummaryFromOutput(outputs)}}, - {Name: "Efficiency Latency Control", Values: []string{common.ELCSummaryFromOutput(outputs)}}, - {Name: "CVEs", Values: []string{cveSummaryFromOutput(outputs)}}, - {Name: "System Summary", Values: []string{systemSummaryFromOutput(outputs)}}, + {Name: "C-states", Values: []string{extract.CstatesSummaryFromOutput(outputs)}}, + {Name: "Efficiency Latency Control", Values: []string{extract.ELCSummaryFromOutput(outputs)}}, + {Name: "CVEs", Values: []string{extract.CVESummaryFromOutput(outputs)}}, + {Name: "System Summary", Values: []string{extract.SystemSummaryFromOutput(outputs)}}, } } func dimmDetails(dimm []string) (details string) { - if strings.Contains(dimm[common.SizeIdx], "No") { + if strings.Contains(dimm[extract.SizeIdx], "No") { details = "No Module Installed" } else { // Intel PMEM modules may have serial number appended to end of part number... // strip that off so it doesn't mess with color selection later - partNumber := dimm[common.PartIdx] - if strings.Contains(dimm[common.DetailIdx], "Synchronous Non-Volatile") && - dimm[common.ManufacturerIdx] == "Intel" && - strings.HasSuffix(dimm[common.PartIdx], dimm[common.SerialIdx]) { - partNumber = dimm[common.PartIdx][:len(dimm[common.PartIdx])-len(dimm[common.SerialIdx])] + partNumber := dimm[extract.PartIdx] + if strings.Contains(dimm[extract.DetailIdx], "Synchronous Non-Volatile") && + dimm[extract.ManufacturerIdx] == "Intel" && + strings.HasSuffix(dimm[extract.PartIdx], dimm[extract.SerialIdx]) { + partNumber = dimm[extract.PartIdx][:len(dimm[extract.PartIdx])-len(dimm[extract.SerialIdx])] } // example: "64GB DDR5 R2 Synchronous Registered (Buffered) Micron Technology MTC78ASF4G72PZ-2G6E1 6400 MT/s [6000 MT/s]" details = fmt.Sprintf("%s %s %s R%s %s %s %s [%s]", - strings.ReplaceAll(dimm[common.SizeIdx], " ", ""), - dimm[common.TypeIdx], - dimm[common.DetailIdx], - dimm[common.RankIdx], - dimm[common.ManufacturerIdx], + strings.ReplaceAll(dimm[extract.SizeIdx], " ", ""), + dimm[extract.TypeIdx], + dimm[extract.DetailIdx], + dimm[extract.RankIdx], + dimm[extract.ManufacturerIdx], partNumber, - strings.ReplaceAll(dimm[common.SpeedIdx], " ", ""), - strings.ReplaceAll(dimm[common.ConfiguredSpeedIdx], " ", "")) + strings.ReplaceAll(dimm[extract.SpeedIdx], " ", ""), + strings.ReplaceAll(dimm[extract.ConfiguredSpeedIdx], " ", "")) } return } func dimmTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - if len(tableValues.Fields) <= max(common.DerivedSocketIdx, common.DerivedChannelIdx, common.DerivedSlotIdx) || - len(tableValues.Fields[common.DerivedSocketIdx].Values) == 0 || - len(tableValues.Fields[common.DerivedChannelIdx].Values) == 0 || - len(tableValues.Fields[common.DerivedSlotIdx].Values) == 0 || - tableValues.Fields[common.DerivedSocketIdx].Values[0] == "" || - tableValues.Fields[common.DerivedChannelIdx].Values[0] == "" || - tableValues.Fields[common.DerivedSlotIdx].Values[0] == "" { + if len(tableValues.Fields) <= max(extract.DerivedSocketIdx, extract.DerivedChannelIdx, extract.DerivedSlotIdx) || + len(tableValues.Fields[extract.DerivedSocketIdx].Values) == 0 || + len(tableValues.Fields[extract.DerivedChannelIdx].Values) == 0 || + len(tableValues.Fields[extract.DerivedSlotIdx].Values) == 0 || + tableValues.Fields[extract.DerivedSocketIdx].Values[0] == "" || + tableValues.Fields[extract.DerivedChannelIdx].Values[0] == "" || + tableValues.Fields[extract.DerivedSlotIdx].Values[0] == "" { return report.DefaultHTMLTableRendererFunc(tableValues) } htmlColors := []string{"lightgreen", "orange", "aqua", "lime", "yellow", "beige", "magenta", "violet", "salmon", "pink"} var slotColorIndices = make(map[string]int) // socket -> channel -> slot -> dimm details var dimms = map[string]map[string]map[string]string{} - for dimmIdx := range tableValues.Fields[common.DerivedSocketIdx].Values { - if _, ok := dimms[tableValues.Fields[common.DerivedSocketIdx].Values[dimmIdx]]; !ok { - dimms[tableValues.Fields[common.DerivedSocketIdx].Values[dimmIdx]] = make(map[string]map[string]string) + for dimmIdx := range tableValues.Fields[extract.DerivedSocketIdx].Values { + if _, ok := dimms[tableValues.Fields[extract.DerivedSocketIdx].Values[dimmIdx]]; !ok { + dimms[tableValues.Fields[extract.DerivedSocketIdx].Values[dimmIdx]] = make(map[string]map[string]string) } - if _, ok := dimms[tableValues.Fields[common.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[common.DerivedChannelIdx].Values[dimmIdx]]; !ok { - dimms[tableValues.Fields[common.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[common.DerivedChannelIdx].Values[dimmIdx]] = make(map[string]string) + if _, ok := dimms[tableValues.Fields[extract.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[extract.DerivedChannelIdx].Values[dimmIdx]]; !ok { + dimms[tableValues.Fields[extract.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[extract.DerivedChannelIdx].Values[dimmIdx]] = make(map[string]string) } dimmValues := []string{} for _, field := range tableValues.Fields { dimmValues = append(dimmValues, field.Values[dimmIdx]) } - dimms[tableValues.Fields[common.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[common.DerivedChannelIdx].Values[dimmIdx]][tableValues.Fields[common.DerivedSlotIdx].Values[dimmIdx]] = dimmDetails(dimmValues) + dimms[tableValues.Fields[extract.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[extract.DerivedChannelIdx].Values[dimmIdx]][tableValues.Fields[extract.DerivedSlotIdx].Values[dimmIdx]] = dimmDetails(dimmValues) } var socketTableHeaders = []string{"Socket", ""} diff --git a/cmd/report/system.go b/cmd/report/system.go deleted file mode 100644 index 20695c99..00000000 --- a/cmd/report/system.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -package report - -import ( - "fmt" - "log/slog" - "regexp" - "strings" - "time" - - "perfspect/internal/common" - "perfspect/internal/cpus" - "perfspect/internal/script" - "perfspect/internal/table" -) - -func systemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . - template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." - var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string - - // system type - systemType = common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) - // socket count - socketCount = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) - // CPU model - cpuModel = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) - // core count - coreCount = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) - // TDP - tdp = common.TDPFromOutput(outputs) - if tdp == "" { - tdp = "?" - } - vendor := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - // hyperthreading - htLabel = "HT" - if vendor == cpus.AMDVendor { - htLabel = "SMT" - } - htOnOff = common.HyperthreadingFromOutput(outputs) - switch htOnOff { - case "Enabled": - htOnOff = "On" - case "Disabled": - htOnOff = "Off" - case "N/A": - htOnOff = "N/A" - default: - htOnOff = "?" - } - // turbo - turboLabel = "Turbo" - if vendor == cpus.AMDVendor { - turboLabel = "Boost" - } - turboOnOff = turboEnabledFromOutput(outputs) - if strings.Contains(strings.ToLower(turboOnOff), "enabled") { - turboOnOff = "On" - } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { - turboOnOff = "Off" - } else { - turboOnOff = "?" - } - // memory - installedMem = common.InstalledMemoryFromOutput(outputs) - // BIOS - biosVersion = common.ValFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) - // microcode - uCodeVersion = common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) - // NICs - nics = common.NICSummaryFromOutput(outputs) - // disks - disks = common.DiskSummaryFromOutput(outputs) - // OS - operatingSystem = common.OperatingSystemFromOutput(outputs) - // kernel - kernelVersion = common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) - // date - date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) - // parse date so that we can format it - parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM - if err != nil { - parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM - } - if err == nil { - date = parsedTime.Format("January 2 2006") - } - - // put it all together - return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) -} - -func filesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []table.Field { - fieldValues := []table.Field{} - reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) - for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { - if line == "" { - continue - } - fields := strings.Fields(line) - // "Mounted On" gets split into two fields, rejoin - if i == 0 && len(fields) >= 2 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { - fields[len(fields)-2] = "Mounted on" - fields = fields[:len(fields)-1] - for _, field := range fields { - fieldValues = append(fieldValues, table.Field{Name: field, Values: []string{}}) - } - // add an additional field - fieldValues = append(fieldValues, table.Field{Name: "Mount Options", Values: []string{}}) - continue - } - if len(fields) != len(fieldValues)-1 { - slog.Error("unexpected number of fields in df output", slog.String("line", line)) - return nil - } - for i, field := range fields { - fieldValues[i].Values = append(fieldValues[i].Values, field) - } - // get mount options for the current file system - var options string - for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { - if i == 0 { - continue - } - match := reFindmnt.FindStringSubmatch(line) - if match != nil && len(fields) > 5 { - target := match[1] - source := match[2] - if fields[0] == source && fields[5] == target { - options = match[4] - break - } - } - } - fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) - } - return fieldValues -} diff --git a/cmd/root.go b/cmd/root.go index 659659f3..d947a72a 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -29,7 +29,7 @@ import ( "perfspect/cmd/metrics" "perfspect/cmd/report" "perfspect/cmd/telemetry" - "perfspect/internal/common" + "perfspect/internal/app" "perfspect/internal/util" "github.com/pkg/errors" @@ -46,17 +46,17 @@ const ( ) var examples = []string{ - fmt.Sprintf(" Generate a configuration report: $ %s report", common.AppName), - fmt.Sprintf(" Collect micro-architectural metrics: $ %s metrics", common.AppName), - fmt.Sprintf(" Generate a configuration report on a remote target: $ %s report --target 192.168.1.2 --user elaine --key ~/.ssh/id_rsa", common.AppName), - fmt.Sprintf(" Generate configuration reports for multiple remote targets: $ %s report --targets ./targets.yaml", common.AppName), + fmt.Sprintf(" Generate a configuration report: $ %s report", app.Name), + fmt.Sprintf(" Collect micro-architectural metrics: $ %s metrics", app.Name), + fmt.Sprintf(" Generate a configuration report on a remote target: $ %s report --target 192.168.1.2 --user elaine --key ~/.ssh/id_rsa", app.Name), + fmt.Sprintf(" Generate configuration reports for multiple remote targets: $ %s report --targets ./targets.yaml", app.Name), } // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ - Use: common.AppName, - Short: common.AppName, - Long: fmt.Sprintf(`%s (%s) is a multi-function utility for performance engineers analyzing software running on Intel Xeon platforms.`, LongAppName, common.AppName), + Use: app.Name, + Short: app.Name, + Long: fmt.Sprintf(`%s (%s) is a multi-function utility for performance engineers analyzing software running on Intel Xeon platforms.`, LongAppName, app.Name), Example: strings.Join(examples, "\n"), PersistentPreRunE: initializeApplication, // will only be run if command has a 'Run' function PersistentPostRunE: terminateApplication, // ... @@ -115,12 +115,12 @@ Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} rootCmd.AddGroup([]*cobra.Group{{ID: "other", Title: "Other Commands:"}}...) rootCmd.AddCommand(updateCmd) // Global (persistent) flags - rootCmd.PersistentFlags().BoolVar(&flagDebug, common.FlagDebugName, false, "enable debug logging and retain temporary directories") - rootCmd.PersistentFlags().BoolVar(&flagSyslog, common.FlagSyslogName, false, "write logs to syslog instead of a file") - rootCmd.PersistentFlags().BoolVar(&flagLogStdOut, common.FlagLogStdOutName, false, "write logs to stdout") - rootCmd.PersistentFlags().StringVar(&flagOutputDir, common.FlagOutputDirName, "", "override the output directory") - rootCmd.PersistentFlags().StringVar(&flagTargetTempRoot, common.FlagTargetTempRootName, "", "override the temporary target directory, must exist and allow execution") - rootCmd.PersistentFlags().BoolVar(&flagNoCheckUpdate, common.FlagNoCheckUpdateName, false, "skip application update check") + rootCmd.PersistentFlags().BoolVar(&flagDebug, app.FlagDebugName, false, "enable debug logging and retain temporary directories") + rootCmd.PersistentFlags().BoolVar(&flagSyslog, app.FlagSyslogName, false, "write logs to syslog instead of a file") + rootCmd.PersistentFlags().BoolVar(&flagLogStdOut, app.FlagLogStdOutName, false, "write logs to stdout") + rootCmd.PersistentFlags().StringVar(&flagOutputDir, app.FlagOutputDirName, "", "override the output directory") + rootCmd.PersistentFlags().StringVar(&flagTargetTempRoot, app.FlagTargetTempRootName, "", "override the temporary target directory, must exist and allow execution") + rootCmd.PersistentFlags().BoolVar(&flagNoCheckUpdate, app.FlagNoCheckUpdateName, false, "skip application update check") } // Execute adds all child commands to the root command and sets flags appropriately. @@ -152,7 +152,7 @@ func initializeApplication(cmd *cobra.Command, args []string) error { } } else { // set output dir path to app name + timestamp - outputDirName := common.AppName + "_" + timestamp + outputDirName := app.Name + "_" + timestamp var err error // outputDir will be in current working directory outputDir, err = util.AbsPath(outputDirName) @@ -186,16 +186,16 @@ func initializeApplication(cmd *cobra.Command, args []string) error { } else { // log to file // open log file in current directory var err error - gLogFile, err = os.OpenFile(common.AppName+".log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) // #nosec G302 + gLogFile, err = os.OpenFile(app.Name+".log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) // #nosec G302 if err != nil { fmt.Printf("Error: failed to open log file: %v\n", err) os.Exit(1) } slog.SetDefault(slog.New(slog.NewTextHandler(gLogFile, &logOpts))) } - slog.Info("Starting up", slog.String("app", common.AppName), slog.String("version", gVersion), slog.Int("PID", os.Getpid()), slog.String("arguments", strings.Join(os.Args, " "))) + slog.Info("Starting up", slog.String("app", app.Name), slog.String("version", gVersion), slog.Int("PID", os.Getpid()), slog.String("arguments", strings.Join(os.Args, " "))) // creat local temp directory - localTempDir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("%s.tmp.", common.AppName)) + localTempDir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("%s.tmp.", app.Name)) if err != nil { fmt.Printf("Error: failed to create temp dir: %v\n", err) os.Exit(1) @@ -208,8 +208,8 @@ func initializeApplication(cmd *cobra.Command, args []string) error { cmd.Parent().SetContext( context.WithValue( context.Background(), - common.AppContext{}, - common.AppContext{ + app.Context{}, + app.Context{ Timestamp: timestamp, OutputDir: outputDir, LocalTempDir: localTempDir, @@ -240,7 +240,7 @@ func initializeApplication(cmd *cobra.Command, args []string) error { if err != nil { slog.Error(err.Error()) } else if updateAvailable { - fmt.Fprintf(os.Stderr, "A new version (%s) of %s is available!\nPlease run '%s update' to update to the latest version.\n\n", latestManifest.Version, common.AppName, common.AppName) + fmt.Fprintf(os.Stderr, "A new version (%s) of %s is available!\nPlease run '%s update' to update to the latest version.\n\n", latestManifest.Version, app.Name, app.Name) } else { slog.Debug("No updates available") } @@ -258,9 +258,9 @@ func terminateApplication(cmd *cobra.Command, args []string) error { ctx = cmd.Parent().Context() } if ctx != nil { - ctxValue := ctx.Value(common.AppContext{}) + ctxValue := ctx.Value(app.Context{}) if ctxValue != nil { - if appContext, ok := ctxValue.(common.AppContext); ok { + if appContext, ok := ctxValue.(app.Context); ok { // clean up temp directory if debug flag is not set if appContext.LocalTempDir != "" && !flagDebug { err := os.RemoveAll(appContext.LocalTempDir) @@ -268,7 +268,7 @@ func terminateApplication(cmd *cobra.Command, args []string) error { slog.Error("error cleaning up temp directory", slog.String("tempDir", appContext.LocalTempDir), slog.String("error", err.Error())) } } - slog.Info("Shutting down", slog.String("app", common.AppName), slog.String("version", gVersion), slog.Int("PID", os.Getpid()), slog.String("arguments", strings.Join(os.Args, " "))) + slog.Info("Shutting down", slog.String("app", app.Name), slog.String("version", gVersion), slog.Int("PID", os.Getpid()), slog.String("arguments", strings.Join(os.Args, " "))) if gLogFile != nil { err := gLogFile.Close() if err != nil { @@ -332,7 +332,7 @@ var updateCmd = &cobra.Command{ if !onIntelNetwork() { return fmt.Errorf("update command is only available on the Intel network") } - appContext := cmd.Parent().Context().Value(common.AppContext{}).(common.AppContext) + appContext := cmd.Parent().Context().Value(app.Context{}).(app.Context) localTempDir := appContext.LocalTempDir updateAvailable, latestManifest, err := checkForUpdates(gVersion) if err != nil { @@ -340,7 +340,7 @@ var updateCmd = &cobra.Command{ fmt.Printf("Error: update check failed: %v\n", err) return err } else if updateAvailable { - fmt.Printf("Updating %s to version %s...\n", common.AppName, latestManifest.Version) + fmt.Printf("Updating %s to version %s...\n", app.Name, latestManifest.Version) err = updateApp(latestManifest, localTempDir) if err != nil { slog.Error("Failed to update application", slog.String("error", err.Error())) @@ -349,7 +349,7 @@ var updateCmd = &cobra.Command{ } } else { slog.Info("No updates available") - fmt.Printf("No updates available for %s.\n", common.AppName) + fmt.Printf("No updates available for %s.\n", app.Name) } return nil }, diff --git a/cmd/telemetry/telemetry.go b/cmd/telemetry/telemetry.go index aaf40106..1833f3eb 100644 --- a/cmd/telemetry/telemetry.go +++ b/cmd/telemetry/telemetry.go @@ -12,10 +12,11 @@ import ( "strconv" "strings" - "perfspect/internal/common" + "perfspect/internal/app" "perfspect/internal/report" "perfspect/internal/script" "perfspect/internal/table" + "perfspect/internal/workflow" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -26,10 +27,10 @@ import ( const cmdName = "telemetry" var examples = []string{ - fmt.Sprintf(" Telemetry from local host: $ %s %s", common.AppName, cmdName), - fmt.Sprintf(" Telemetry from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", common.AppName, cmdName), - fmt.Sprintf(" Memory telemetry for 60 seconds: $ %s %s --memory --duration 60", common.AppName, cmdName), - fmt.Sprintf(" Telemetry from multiple targets: $ %s %s --targets targets.yaml", common.AppName, cmdName), + fmt.Sprintf(" Telemetry from local host: $ %s %s", app.Name, cmdName), + fmt.Sprintf(" Telemetry from remote target: $ %s %s --target 192.168.1.1 --user fred --key fred_key", app.Name, cmdName), + fmt.Sprintf(" Memory telemetry for 60 seconds: $ %s %s --memory --duration 60", app.Name, cmdName), + fmt.Sprintf(" Telemetry from multiple targets: $ %s %s --targets targets.yaml", app.Name, cmdName), } var Cmd = &cobra.Command{ @@ -95,7 +96,7 @@ const ( var telemetrySummaryTableName = "Telemetry Summary" -var categories = []common.Category{ +var categories = []app.Category{ {FlagName: flagCPUName, FlagVar: &flagCPU, DefaultValue: false, Help: "monitor cpu utilization", Tables: []table.TableDefinition{tableDefinitions[CPUUtilizationTelemetryTableName], tableDefinitions[UtilizationCategoriesTelemetryTableName]}}, {FlagName: flagIPCName, FlagVar: &flagIPC, DefaultValue: false, Help: "monitor IPC", Tables: []table.TableDefinition{tableDefinitions[IPCTelemetryTableName]}}, {FlagName: flagC6Name, FlagVar: &flagC6, DefaultValue: false, Help: "monitor C6 residency", Tables: []table.TableDefinition{tableDefinitions[C6TelemetryTableName]}}, @@ -119,16 +120,16 @@ func init() { for _, cat := range categories { Cmd.Flags().BoolVar(cat.FlagVar, cat.FlagName, cat.DefaultValue, cat.Help) } - Cmd.Flags().StringVar(&common.FlagInput, common.FlagInputName, "", "") + Cmd.Flags().StringVar(&app.FlagInput, app.FlagInputName, "", "") Cmd.Flags().BoolVar(&flagAll, flagAllName, true, "") - Cmd.Flags().StringSliceVar(&common.FlagFormat, common.FlagFormatName, []string{report.FormatAll}, "") + Cmd.Flags().StringSliceVar(&app.FlagFormat, app.FlagFormatName, []string{report.FormatAll}, "") Cmd.Flags().IntVar(&flagDuration, flagDurationName, 0, "") Cmd.Flags().IntVar(&flagInterval, flagIntervalName, 2, "") Cmd.Flags().IntVar(&flagInstrMixPid, flagInstrMixPidName, 0, "") Cmd.Flags().IntVar(&flagInstrMixFrequency, flagInstrMixFrequencyName, instrmixFrequencyDefaultSystemWide, "") Cmd.Flags().BoolVar(&flagNoSystemSummary, flagNoSystemSummaryName, false, "") - common.AddTargetFlags(Cmd) + workflow.AddTargetFlags(Cmd) Cmd.SetUsageFunc(usageFunc) } @@ -158,27 +159,27 @@ func usageFunc(cmd *cobra.Command) error { return nil } -func getFlagGroups() []common.FlagGroup { - var groups []common.FlagGroup - flags := []common.Flag{ +func getFlagGroups() []app.FlagGroup { + var groups []app.FlagGroup + flags := []app.Flag{ { Name: flagAllName, Help: "collect telemetry for all categories", }, } for _, cat := range categories { - flags = append(flags, common.Flag{ + flags = append(flags, app.Flag{ Name: cat.FlagName, Help: cat.Help, }) } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Categories", Flags: flags, }) - flags = []common.Flag{ + flags = []app.Flag{ { - Name: common.FlagFormatName, + Name: app.FlagFormatName, Help: fmt.Sprintf("choose output format(s) from: %s", strings.Join(append([]string{report.FormatAll}, report.FormatOptions...), ", ")), }, { @@ -202,18 +203,18 @@ func getFlagGroups() []common.FlagGroup { Help: "do not include system summary table in report", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Other Options", Flags: flags, }) - groups = append(groups, common.GetTargetFlagGroup()) - flags = []common.Flag{ + groups = append(groups, workflow.GetTargetFlagGroup()) + flags = []app.Flag{ { - Name: common.FlagInputName, + Name: app.FlagInputName, Help: "\".raw\" file, or directory containing \".raw\" files. Will skip data collection and use raw data for reports.", }, } - groups = append(groups, common.FlagGroup{ + groups = append(groups, app.FlagGroup{ GroupName: "Advanced Options", Flags: flags, }) @@ -231,29 +232,29 @@ func validateFlags(cmd *cobra.Command, args []string) error { } } // validate format options - for _, format := range common.FlagFormat { + for _, format := range app.FlagFormat { formatOptions := []string{report.FormatAll} formatOptions = append(formatOptions, report.FormatOptions...) if !slices.Contains(formatOptions, format) { - return common.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) + return workflow.FlagValidationError(cmd, fmt.Sprintf("format options are: %s", strings.Join(formatOptions, ", "))) } } if flagInterval < 1 { - return common.FlagValidationError(cmd, "interval must be 1 or greater") + return workflow.FlagValidationError(cmd, "interval must be 1 or greater") } if flagDuration < 0 { - return common.FlagValidationError(cmd, "duration must be 0 or greater") + return workflow.FlagValidationError(cmd, "duration must be 0 or greater") } if flagInstrMixFrequency < 100000 { // 100,000 instructions is the minimum frequency - return common.FlagValidationError(cmd, "instruction mix frequency must be 100,000 or greater to limit overhead") + return workflow.FlagValidationError(cmd, "instruction mix frequency must be 100,000 or greater to limit overhead") } // warn if instruction mix frequency is low when collecting system wide if flagInstrMix && flagInstrMixPid == 0 && flagInstrMixFrequency < instrmixFrequencyDefaultSystemWide { slog.Warn("instruction mix frequency is set to a value lower than default for system wide collection, consider using a higher frequency to limit collection overhead", slog.Int("frequency", flagInstrMixFrequency)) } // common target flags - if err := common.ValidateTargetFlags(cmd); err != nil { - return common.FlagValidationError(cmd, err.Error()) + if err := workflow.ValidateTargetFlags(cmd); err != nil { + return workflow.FlagValidationError(cmd, err.Error()) } return nil } @@ -262,7 +263,7 @@ func runCmd(cmd *cobra.Command, args []string) error { var tables []table.TableDefinition // add system summary table if not disabled if !flagNoSystemSummary { - tables = append(tables, common.TableDefinitions[common.SystemSummaryTableName]) + tables = append(tables, app.TableDefinitions[app.SystemSummaryTableName]) } // add category tables for _, cat := range categories { @@ -293,16 +294,16 @@ func runCmd(cmd *cobra.Command, args []string) error { tables = append(tables, tableDefinitions[PDUTelemetryTableName]) } // include telemetry summary table if all telemetry options are selected - var summaryFunc common.SummaryFunc + var summaryFunc app.SummaryFunc if flagAll { summaryFunc = summaryFromTableValues } // include insights table if all categories are selected - var insightsFunc common.InsightsFunc + var insightsFunc app.InsightsFunc if flagAll { - insightsFunc = common.DefaultInsightsFunc + insightsFunc = workflow.DefaultInsightsFunc } - reportingCommand := common.ReportingCommand{ + reportingCommand := workflow.ReportingCommand{ Cmd: cmd, ReportNamePost: "telem", ScriptParams: map[string]string{ diff --git a/cmd/telemetry/telemetry_tables.go b/cmd/telemetry/telemetry_tables.go index 9076bc78..1cce8228 100644 --- a/cmd/telemetry/telemetry_tables.go +++ b/cmd/telemetry/telemetry_tables.go @@ -7,8 +7,8 @@ import ( "encoding/csv" "fmt" "log/slog" - "perfspect/internal/common" "perfspect/internal/cpus" + "perfspect/internal/extract" "perfspect/internal/script" "perfspect/internal/table" "regexp" @@ -367,7 +367,7 @@ func powerTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.F fields := []table.Field{ {Name: "Time"}, } - packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgWatt", "RAMWatt"}) + packageRows, err := extract.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgWatt", "RAMWatt"}) if err != nil { slog.Warn(err.Error()) return []table.Field{} @@ -400,12 +400,12 @@ func temperatureTelemetryTableValues(outputs map[string]script.ScriptOutput) []t {Name: "Time"}, {Name: "Core (Avg.)"}, } - platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"CoreTmp"}) + platformRows, err := extract.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"CoreTmp"}) if err != nil { slog.Warn(err.Error()) // not all systems report core temperature, e.g., cloud VMs return []table.Field{} } - packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgTmp"}) + packageRows, err := extract.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgTmp"}) if err != nil { // not an error, just means no package rows (package temperature) slog.Warn(err.Error()) @@ -440,12 +440,12 @@ func frequencyTelemetryTableValues(outputs map[string]script.ScriptOutput) []tab {Name: "Time"}, {Name: "Core (Avg.)"}, } - platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"Bzy_MHz"}) + platformRows, err := extract.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"Bzy_MHz"}) if err != nil { slog.Warn(err.Error()) return []table.Field{} } - packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"UncMHz"}) + packageRows, err := extract.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"UncMHz"}) if err != nil { // not an error, just means no package rows (uncore frequency) slog.Warn(err.Error()) @@ -480,7 +480,7 @@ func ipcTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Fie {Name: "Time"}, {Name: "Core (Avg.)"}, } - platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"IPC"}) + platformRows, err := extract.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"IPC"}) if err != nil { slog.Warn(err.Error()) return []table.Field{} @@ -505,7 +505,7 @@ func c6TelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Fiel {Name: "Package (Avg.)"}, {Name: "Core (Avg.)"}, } - platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"C6%", "CPU%c6"}) + platformRows, err := extract.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"C6%", "CPU%c6"}) if err != nil { slog.Warn(err.Error()) return []table.Field{} diff --git a/go.mod b/go.mod index 51077d5a..ba7a51e2 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,16 @@ module perfspect go 1.25 replace ( - perfspect/internal/common => ./internal/common - perfspect/internal/cpudb => ./internal/cpudb + perfspect/internal/app => ./internal/app + perfspect/internal/cpus => ./internal/cpus + perfspect/internal/extract => ./internal/extract perfspect/internal/progress => ./internal/progress perfspect/internal/report => ./internal/report perfspect/internal/script => ./internal/script perfspect/internal/table => ./internal/table perfspect/internal/target => ./internal/target perfspect/internal/util => ./internal/util + perfspect/internal/workflow => ./internal/workflow ) require ( diff --git a/internal/app/app.go b/internal/app/app.go new file mode 100644 index 00000000..d1902713 --- /dev/null +++ b/internal/app/app.go @@ -0,0 +1,82 @@ +// Package app defines application-wide types, constants, and context +// that are shared across multiple commands. +package app + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "os" + "path/filepath" + "perfspect/internal/script" + "perfspect/internal/table" +) + +// Name is the name of the application executable. +var Name = filepath.Base(os.Args[0]) + +// Context represents the application context that can be accessed from all commands. +type Context struct { + Timestamp string // Timestamp is the timestamp when the application was started. + OutputDir string // OutputDir is the directory where the application will write output files. + LocalTempDir string // LocalTempDir is the temp directory on the local host (created by the application). + LogFilePath string // LogFilePath is the path to the log file. + TargetTempRoot string // TargetTempRoot is the path to a directory on the target host where the application can create temporary directories. + Version string // Version is the version of the application. + Debug bool // Debug is true if the application is running in debug mode. +} + +// Table name constants used across multiple commands. +const ( + TableNameInsights = "Insights" + TableNamePerfspect = "PerfSpect" +) + +// Flag names for input and format flags used by reporting commands. +const ( + FlagInputName = "input" + FlagFormatName = "format" +) + +// Global flag variables for reporting commands. +var ( + FlagInput string + FlagFormat []string +) + +// Category represents a configuration category with associated tables and flags. +type Category struct { + FlagName string + Tables []table.TableDefinition + FlagVar *bool + DefaultValue bool + Help string +} + +// Flag names for flags defined in the root command, but sometimes used in other commands. +const ( + FlagDebugName = "debug" + FlagSyslogName = "syslog" + FlagLogStdOutName = "log-stdout" + FlagOutputDirName = "output" + FlagTargetTempRootName = "tempdir" + FlagNoCheckUpdateName = "noupdate" +) + +// Flag represents a command-line flag with its name and help text. +type Flag struct { + Name string + Help string +} + +// FlagGroup represents a group of related flags with a group name. +type FlagGroup struct { + GroupName string + Flags []Flag +} + +// SummaryFunc is a function type for generating summary table values from processed tables. +type SummaryFunc func([]table.TableValues, map[string]script.ScriptOutput) table.TableValues + +// InsightsFunc is a function type for generating insights from processed tables. +type InsightsFunc SummaryFunc diff --git a/internal/app/app_tables.go b/internal/app/app_tables.go new file mode 100644 index 00000000..44a41f88 --- /dev/null +++ b/internal/app/app_tables.go @@ -0,0 +1,82 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package app + +// This file contains common table definitions used across multiple commands. + +import ( + "strings" + + "perfspect/internal/extract" + "perfspect/internal/script" + "perfspect/internal/table" +) + +// SystemSummaryTableName is the name of the system summary table. +const SystemSummaryTableName = "System Summary" + +// TableDefinitions contains table definitions used across multiple commands. +var TableDefinitions = map[string]table.TableDefinition{ + SystemSummaryTableName: { + Name: SystemSummaryTableName, + MenuLabel: SystemSummaryTableName, + HasRows: false, + ScriptNames: []string{ + script.HostnameScriptName, + script.DateScriptName, + script.LscpuScriptName, + script.LscpuCacheScriptName, + script.LspciBitsScriptName, + script.LspciDevicesScriptName, + script.SpecCoreFrequenciesScriptName, + script.MeminfoScriptName, + script.NicInfoScriptName, + script.DiskInfoScriptName, + script.UnameScriptName, + script.EtcReleaseScriptName, + script.PackagePowerLimitName, + script.EpbScriptName, + script.ScalingDriverScriptName, + script.ScalingGovernorScriptName, + script.CstatesScriptName, + script.ElcScriptName, + script.ArmImplementerScriptName, + script.ArmPartScriptName, + script.ArmDmidecodePartScriptName, + script.DmidecodeScriptName, + }, + FieldsFunc: BriefSummaryTableValues}, +} + +// BriefSummaryTableValues returns the field values for the system summary table. +func BriefSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + memory := extract.InstalledMemoryFromOutput(outputs) + if memory == "" { + memory = extract.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`) + } + return []table.Field{ + {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, + {Name: "Time", Values: []string{strings.TrimSpace(outputs[script.DateScriptName].Stdout)}}, + {Name: "CPU Model", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{extract.UarchFromOutput(outputs)}}, + {Name: "TDP", Values: []string{extract.TDPFromOutput(outputs)}}, + {Name: "Sockets", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "Cores per Socket", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Hyperthreading", Values: []string{extract.HyperthreadingFromOutput(outputs)}}, + {Name: "CPUs", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, + {Name: "NUMA Nodes", Values: []string{extract.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, + {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, + {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, + {Name: "C-states", Values: []string{extract.CstatesSummaryFromOutput(outputs)}}, + {Name: "Maximum Frequency", Values: []string{extract.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, + {Name: "All-core Maximum Frequency", Values: []string{extract.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, + {Name: "Energy Performance Bias", Values: []string{extract.EPBFromOutput(outputs)}}, + {Name: "Efficiency Latency Control", Values: []string{extract.ELCSummaryFromOutput(outputs)}}, + {Name: "Memory", Values: []string{memory}}, + {Name: "NIC", Values: []string{extract.NICSummaryFromOutput(outputs)}}, + {Name: "Disk", Values: []string{extract.DiskSummaryFromOutput(outputs)}}, + {Name: "OS", Values: []string{extract.OperatingSystemFromOutput(outputs)}}, + {Name: "Kernel", Values: []string{extract.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, + } +} diff --git a/internal/common/common.go b/internal/common/common.go deleted file mode 100644 index 6dc5f336..00000000 --- a/internal/common/common.go +++ /dev/null @@ -1,755 +0,0 @@ -// Package common defines data structures and functions that are used by multiple -// application commands, e.g., report, telemetry, flame, lock. -package common - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "context" - "errors" - "fmt" - "log/slog" - "os" - "os/exec" - "os/signal" - "path/filepath" - "perfspect/internal/progress" - "perfspect/internal/report" - "perfspect/internal/script" - "perfspect/internal/table" - "perfspect/internal/target" - "perfspect/internal/util" - "strings" - "syscall" - "time" - - "slices" - - "github.com/spf13/cobra" -) - -// Flag names for flags defined in the root command, but sometimes used in other commands. -const ( - FlagDebugName = "debug" - FlagSyslogName = "syslog" - FlagLogStdOutName = "log-stdout" - FlagOutputDirName = "output" - FlagTargetTempRootName = "tempdir" - FlagNoCheckUpdateName = "noupdate" -) - -var AppName = filepath.Base(os.Args[0]) - -// AppContext represents the application context that can be accessed from all commands. -type AppContext struct { - Timestamp string // Timestamp is the timestamp when the application was started. - OutputDir string // OutputDir is the directory where the application will write output files. - LocalTempDir string // LocalTempDir is the temp directory on the local host (created by the application). - LogFilePath string // LogFilePath is the path to the log file. - TargetTempRoot string // TargetTempRoot is the path to a directory on the target host where the application can create temporary directories. - Version string // Version is the version of the application. - Debug bool // Debug is true if the application is running in debug mode. -} - -type Flag struct { - Name string - Help string -} -type FlagGroup struct { - GroupName string - Flags []Flag -} - -type TargetScriptOutputs struct { - TargetName string - ScriptOutputs map[string]script.ScriptOutput - Tables []table.TableDefinition -} - -func (tso *TargetScriptOutputs) GetScriptOutputs() map[string]script.ScriptOutput { - return tso.ScriptOutputs -} - -const ( - TableNameInsights = "Insights" - TableNamePerfspect = "PerfSpect" -) - -type Category struct { - FlagName string - Tables []table.TableDefinition - FlagVar *bool - DefaultValue bool - Help string -} - -var ( - FlagInput string - FlagFormat []string -) - -const ( - FlagInputName = "input" - FlagFormatName = "format" -) - -type SummaryFunc func([]table.TableValues, map[string]script.ScriptOutput) table.TableValues -type InsightsFunc SummaryFunc -type AdhocFunc func(AppContext, map[string]script.ScriptOutput, target.Target, progress.MultiSpinnerUpdateFunc) error - -type ReportingCommand struct { - Cmd *cobra.Command - ReportNamePost string - Tables []table.TableDefinition - ScriptParams map[string]string - SummaryFunc SummaryFunc - SummaryTableName string // e.g., the benchmark or telemetry summary table - SummaryBeforeTableName string // the name of the table that the summary table should be placed before in the report - InsightsFunc InsightsFunc - AdhocFunc AdhocFunc - SystemSummaryTableName string // Optional: Only affects xlsx format reports. If set, the table with this name will be used as the "Brief" sheet in the xlsx report. If empty or unset, no "Brief" sheet is generated. -} - -// Run is the common flow/logic for all reporting commands, i.e., 'report', 'telemetry', 'flame', 'lock' -// The individual commands populate the ReportingCommand struct with the details specific to the command -// and then call this Run function. -func (rc *ReportingCommand) Run() error { - // appContext is the application context that holds common data and resources. - appContext := rc.Cmd.Parent().Context().Value(AppContext{}).(AppContext) - timestamp := appContext.Timestamp - localTempDir := appContext.LocalTempDir - outputDir := appContext.OutputDir - logFilePath := appContext.LogFilePath - // create output directory - err := util.CreateDirectoryIfNotExists(outputDir, 0755) // #nosec G301 - if err != nil { - err = fmt.Errorf("failed to create output directory: %w", err) - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - - var myTargets []target.Target - var orderedTargetScriptOutputs []TargetScriptOutputs - if FlagInput != "" { - var err error - orderedTargetScriptOutputs, err = outputsFromInput(rc.Tables, rc.SummaryTableName) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - } else { - // get the targets - var targetErrs []error - var err error - myTargets, targetErrs, err = GetTargets(rc.Cmd, elevatedPrivilegesRequired(rc.Tables), false, localTempDir) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - // schedule the cleanup of the temporary directory on each target (if not debugging) - if rc.Cmd.Parent().PersistentFlags().Lookup("debug").Value.String() != "true" { - for _, myTarget := range myTargets { - if myTarget.GetTempDirectory() != "" { - deferTarget := myTarget // create a new variable to capture the current value - defer func(deferTarget target.Target) { - err := deferTarget.RemoveTempDirectory() - if err != nil { - slog.Error("error removing target temporary directory", slog.String("error", err.Error())) - } - }(deferTarget) - } - } - } - // setup and start the progress indicator - multiSpinner := progress.NewMultiSpinner() - for _, target := range myTargets { - err := multiSpinner.AddSpinner(target.GetName()) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - } - multiSpinner.Start() - // remove targets that had errors - var indicesToRemove []int - for i := range targetErrs { - if targetErrs[i] != nil { - _ = multiSpinner.Status(myTargets[i].GetName(), fmt.Sprintf("Error: %v", targetErrs[i])) - indicesToRemove = append(indicesToRemove, i) - } - } - for i := len(indicesToRemove) - 1; i >= 0; i-- { - myTargets = slices.Delete(myTargets, indicesToRemove[i], indicesToRemove[i]+1) - } - // set up signal handler to help with cleaning up child processes on ctrl-c/SIGINT or SIGTERM - configureSignalHandler(myTargets, multiSpinner.Status) - // collect data from targets - orderedTargetScriptOutputs, err = outputsFromTargets(rc.Cmd, myTargets, rc.Tables, rc.ScriptParams, multiSpinner.Status, localTempDir) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - // stop the progress indicator - multiSpinner.Finish() - fmt.Println() - // exit with error if no targets remain - if len(myTargets) == 0 { - err := fmt.Errorf("no successful targets found") - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - } - // create the raw report before processing the data, so that we can save the raw data even if there is an error while processing - var rawReports []string - rawReports, err = rc.createRawReports(appContext, orderedTargetScriptOutputs) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - // check report formats - formats := FlagFormat - if slices.Contains(formats, report.FormatAll) { - formats = report.FormatOptions - } - // process the collected data and create the requested report(s) - reportFilePaths, err := rc.createReports(appContext, orderedTargetScriptOutputs, formats) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - // if we are debugging, create a tgz archive with the raw reports, formatted reports, and log file - if appContext.Debug { - archiveFiles := append(reportFilePaths, rawReports...) - if len(archiveFiles) > 0 { - if logFilePath != "" { - archiveFiles = append(archiveFiles, logFilePath) - } - err := util.CreateFlatTGZ(archiveFiles, filepath.Join(outputDir, AppName+"_"+timestamp+".tgz")) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - } - } - if len(reportFilePaths) > 0 { - fmt.Println("Report files:") - } - for _, reportFilePath := range reportFilePaths { - fmt.Printf(" %s\n", reportFilePath) - } - // lastly, run any adhoc actions - if rc.AdhocFunc != nil { - fmt.Println() - // setup and start the progress indicator - multiSpinner := progress.NewMultiSpinner() - for _, target := range myTargets { - err := multiSpinner.AddSpinner(target.GetName()) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - slog.Error(err.Error()) - rc.Cmd.SilenceUsage = true - return err - } - } - multiSpinner.Start() - adhocErrorChannel := make(chan error) - for i, t := range myTargets { - go func(target target.Target, i int) { - err := rc.AdhocFunc(appContext, orderedTargetScriptOutputs[i].ScriptOutputs, target, multiSpinner.Status) - adhocErrorChannel <- err - }(t, i) - } - // wait for all adhoc actions to complete, errors were reported by the AdhocFunc - for range myTargets { - <-adhocErrorChannel - } - // stop the progress indicator - multiSpinner.Finish() - fmt.Println() - } - return nil -} - -func signalProcessOnTarget(t target.Target, pidStr string, sigStr string) error { - var cmd *exec.Cmd - // prepend "-" to the signal string if not already present - if !strings.HasPrefix(sigStr, "-") { - sigStr = "-" + sigStr - } - if !t.IsSuperUser() && t.CanElevatePrivileges() { - cmd = exec.Command("sudo", "kill", sigStr, pidStr) - } else { - cmd = exec.Command("kill", sigStr, pidStr) - } - _, _, _, err := t.RunCommandEx(cmd, 5, false, true) // #nosec G204 - return err -} - -// configureSignalHandler sets up a signal handler to catch SIGINT and SIGTERM -// -// When perfspect receives ctrl-c while in the shell, the shell propagates the -// signal to all our children. But when perfspect is run in the background or disowned and -// then receives SIGINT, e.g., from a script, we need to send the signal to our children -// -// When running scripts using the controller.sh script, we need to send the signal to the -// controller.sh script on each target so that it can clean up its child processes. This is -// because the controller.sh script is run in its own process group and does not receive the -// signal when perfspect receives it. -// -// Parameters: -// - myTargets: The list of targets to send the signal to. -// - statusFunc: A function to update the status of the progress indicator. -func configureSignalHandler(myTargets []target.Target, statusFunc progress.MultiSpinnerUpdateFunc) { - sigChannel := make(chan os.Signal, 1) - signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM) - go func() { - sig := <-sigChannel - slog.Debug("received signal", slog.String("signal", sig.String())) - // The controller.sh script is run in its own process group, so we need to send the signal - // directly to the PID of the controller. For every target, look for the primary_collection_script - // PID file and send SIGINT to it. - // The controller script is run in its own process group, so we need to send the signal - // directly to the PID of the controller. For every target, look for the controller - // PID file and send SIGINT to it. - for _, t := range myTargets { - if statusFunc != nil { - _ = statusFunc(t.GetName(), "Signal received, cleaning up...") - } - pidFilePath := filepath.Join(t.GetTempDirectory(), script.ControllerPIDFileName) - stdout, _, exitcode, err := t.RunCommandEx(exec.Command("cat", pidFilePath), 5, false, true) // #nosec G204 - if err != nil { - slog.Error("error retrieving target controller PID", slog.String("target", t.GetName()), slog.String("error", err.Error())) - } - if exitcode == 0 { - pidStr := strings.TrimSpace(stdout) - err = signalProcessOnTarget(t, pidStr, "SIGINT") - if err != nil { - slog.Error("error sending SIGINT signal to target controller", slog.String("target", t.GetName()), slog.String("error", err.Error())) - } - } - } - // now wait until all controller scripts have exited - slog.Debug("waiting for controller scripts to exit") - for _, t := range myTargets { - // create a per-target timeout context - targetTimeout := 10 * time.Second - ctx, cancel := context.WithTimeout(context.Background(), targetTimeout) - timedOut := false - pidFilePath := filepath.Join(t.GetTempDirectory(), script.ControllerPIDFileName) - for { - // read the pid file - stdout, _, exitcode, err := t.RunCommandEx(exec.Command("cat", pidFilePath), 5, false, true) // #nosec G204 - if err != nil || exitcode != 0 { - // pid file doesn't exist - break - } - pidStr := strings.TrimSpace(stdout) - // determine if the process still exists - _, _, exitcode, err = t.RunCommandEx(exec.Command("ps", "-p", pidStr), 5, false, true) // #nosec G204 - if err != nil || exitcode != 0 { - break // process no longer exists, script has exited - } - // check for timeout - select { - case <-ctx.Done(): - timedOut = true - default: - } - if timedOut { - if statusFunc != nil { - _ = statusFunc(t.GetName(), "cleanup timeout exceeded, sending kill signal") - } - slog.Warn("signal handler cleanup timeout exceeded for target, sending SIGKILL", slog.String("target", t.GetName())) - err = signalProcessOnTarget(t, pidStr, "SIGKILL") - if err != nil { - slog.Error("error sending SIGKILL signal to target controller", slog.String("target", t.GetName()), slog.String("error", err.Error())) - } - break - } - // sleep for a short time before checking again - time.Sleep(500 * time.Millisecond) - } - cancel() - } - - // send SIGINT to perfspect's children - err := util.SignalChildren(syscall.SIGINT) - if err != nil { - slog.Error("error sending signal to children", slog.String("error", err.Error())) - } - }() -} - -// DefaultInsightsFunc returns the insights table values from the table values -func DefaultInsightsFunc(allTableValues []table.TableValues, scriptOutputs map[string]script.ScriptOutput) table.TableValues { - insightsTableValues := table.TableValues{ - TableDefinition: table.TableDefinition{ - Name: TableNameInsights, - HasRows: true, - MenuLabel: TableNameInsights, - }, - Fields: []table.Field{ - {Name: "Recommendation", Values: []string{}}, - {Name: "Justification", Values: []string{}}, - }, - } - for _, tableValues := range allTableValues { - for _, insight := range tableValues.Insights { - insightsTableValues.Fields[0].Values = append(insightsTableValues.Fields[0].Values, insight.Recommendation) - insightsTableValues.Fields[1].Values = append(insightsTableValues.Fields[1].Values, insight.Justification) - } - } - return insightsTableValues -} - -// FlagValidationError is used to report an error with a flag -func FlagValidationError(cmd *cobra.Command, msg string) error { - err := errors.New(msg) - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - fmt.Fprintf(os.Stderr, "See '%s --help' for usage details.\n", cmd.CommandPath()) - cmd.SilenceUsage = true - return err -} - -// createRawReports creates the raw report(s) from the collected data -// returns the list of report files creates or an error if the report creation failed. -func (rc *ReportingCommand) createRawReports(appContext AppContext, orderedTargetScriptOutputs []TargetScriptOutputs) ([]string, error) { - var reports []string - for _, targetScriptOutputs := range orderedTargetScriptOutputs { - reportBytes, err := report.CreateRawReport(rc.Tables, targetScriptOutputs.ScriptOutputs, targetScriptOutputs.TargetName) - if err != nil { - err = fmt.Errorf("failed to create raw report: %w", err) - return reports, err - } - post := "" - if rc.ReportNamePost != "" { - post = "_" + rc.ReportNamePost - } - reportFilename := fmt.Sprintf("%s%s.%s", targetScriptOutputs.TargetName, post, "raw") - reportPath := filepath.Join(appContext.OutputDir, reportFilename) - if err = writeReport(reportBytes, reportPath); err != nil { - err = fmt.Errorf("failed to write report: %w", err) - return reports, err - } - reports = append(reports, reportPath) - } - return reports, nil -} - -// writeReport writes the report bytes to the specified path. -func writeReport(reportBytes []byte, reportPath string) error { - err := os.WriteFile(reportPath, reportBytes, 0644) // #nosec G306 - if err != nil { - err = fmt.Errorf("failed to write report file: %v", err) - fmt.Fprintln(os.Stderr, err) - slog.Error(err.Error()) - return err - } - return nil -} - -// createReports processes the collected data and creates the requested report(s) -func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetScriptOutputs []TargetScriptOutputs, formats []string) ([]string, error) { - reportFilePaths := []string{} - allTargetsTableValues := make([][]table.TableValues, 0) - for _, targetScriptOutputs := range orderedTargetScriptOutputs { - // process the tables, i.e., get field values from script output - allTableValues, err := table.ProcessTables(targetScriptOutputs.Tables, targetScriptOutputs.ScriptOutputs) - if err != nil { - err = fmt.Errorf("failed to process collected data: %w", err) - return nil, err - } - // special case - the summary table is built from the post-processed data, i.e., table values - if rc.SummaryFunc != nil { - summaryTableValues := rc.SummaryFunc(allTableValues, targetScriptOutputs.ScriptOutputs) - // insert the summary table before the table specified by SummaryBeforeTableName, otherwise append it at the end - summaryBeforeTableFound := false - if rc.SummaryBeforeTableName != "" { - for i, tableValues := range allTableValues { - if tableValues.TableDefinition.Name == rc.SummaryBeforeTableName { - summaryBeforeTableFound = true - // insert the summary table before this table - allTableValues = append(allTableValues[:i], append([]table.TableValues{summaryTableValues}, allTableValues[i:]...)...) - break - } - } - } - if !summaryBeforeTableFound { - // append the summary table at the end - allTableValues = append(allTableValues, summaryTableValues) - } - } - // special case - add tableValues for Insights - if rc.InsightsFunc != nil { - insightsTableValues := rc.InsightsFunc(allTableValues, targetScriptOutputs.ScriptOutputs) - allTableValues = append(allTableValues, insightsTableValues) - } - // special case - add tableValues for the application version - allTableValues = append(allTableValues, table.TableValues{ - TableDefinition: table.TableDefinition{ - Name: TableNamePerfspect, - }, - Fields: []table.Field{ - {Name: "Version", Values: []string{appContext.Version}}, - {Name: "Args", Values: []string{strings.Join(os.Args, " ")}}, - {Name: "OutputDir", Values: []string{appContext.OutputDir}}, - }, - }) - // create the report(s) - for _, format := range formats { - reportBytes, err := report.Create(format, allTableValues, targetScriptOutputs.TargetName, rc.SystemSummaryTableName) - if err != nil { - err = fmt.Errorf("failed to create report: %w", err) - return nil, err - } - if len(formats) == 1 && format == report.FormatTxt { - fmt.Printf("%s:\n", targetScriptOutputs.TargetName) - fmt.Print(string(reportBytes)) - } - post := "" - if rc.ReportNamePost != "" { - post = "_" + rc.ReportNamePost - } - reportFilename := fmt.Sprintf("%s%s.%s", targetScriptOutputs.TargetName, post, format) - reportPath := filepath.Join(appContext.OutputDir, reportFilename) - if err = writeReport(reportBytes, reportPath); err != nil { - err = fmt.Errorf("failed to write report: %w", err) - return nil, err - } - reportFilePaths = append(reportFilePaths, reportPath) - } - // keep all the targets table values for combined reports - allTargetsTableValues = append(allTargetsTableValues, allTableValues) - } - if len(allTargetsTableValues) > 1 && len(orderedTargetScriptOutputs) > 1 { - // list of target names for the combined report - // - only those that we received output from - targetNames := make([]string, 0) - for _, targetScriptOutputs := range orderedTargetScriptOutputs { - targetNames = append(targetNames, targetScriptOutputs.TargetName) - } - // merge table names from all targets maintaining the order of the tables - mergedTableNames := util.MergeOrderedUnique(extractTableNamesFromValues(allTargetsTableValues)) - multiTargetFormats := []string{report.FormatHtml, report.FormatXlsx} - for _, format := range multiTargetFormats { - if !slices.Contains(formats, format) { - continue - } - reportBytes, err := report.CreateMultiTarget(format, allTargetsTableValues, targetNames, mergedTableNames, rc.SummaryTableName) - if err != nil { - err = fmt.Errorf("failed to create multi-target %s report: %w", format, err) - return nil, err - } - reportFilename := fmt.Sprintf("%s.%s", "all_hosts", format) - reportPath := filepath.Join(appContext.OutputDir, reportFilename) - if err = writeReport(reportBytes, reportPath); err != nil { - err = fmt.Errorf("failed to write multi-target %s report: %w", format, err) - return nil, err - } - reportFilePaths = append(reportFilePaths, reportPath) - } - } - return reportFilePaths, nil -} - -// extractTableNamesFromValues extracts the table names from the processed table values for each target. -// It returns a slice of slices, where each inner slice contains the table names for a target. -func extractTableNamesFromValues(allTargetsTableValues [][]table.TableValues) [][]string { - targetTableNames := make([][]string, 0, len(allTargetsTableValues)) - for _, tableValues := range allTargetsTableValues { - names := make([]string, 0, len(tableValues)) - for _, tv := range tableValues { - names = append(names, tv.TableDefinition.Name) - } - targetTableNames = append(targetTableNames, names) - } - return targetTableNames -} - -func findTableByName(tables []table.TableDefinition, name string) (*table.TableDefinition, error) { - for _, tbl := range tables { - if tbl.Name == name { - return &tbl, nil - } - } - return nil, fmt.Errorf("table [%s] not found", name) -} - -// outputsFromInput reads the raw file(s) and returns the data in the order of the raw files -func outputsFromInput(tables []table.TableDefinition, summaryTableName string) ([]TargetScriptOutputs, error) { - orderedTargetScriptOutputs := []TargetScriptOutputs{} - includedTables := []table.TableDefinition{} - // read the raw file(s) as JSON - rawReports, err := report.ReadRawReports(FlagInput) - if err != nil { - err = fmt.Errorf("failed to read raw file(s): %w", err) - return nil, err - } - for _, rawReport := range rawReports { - for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories - // filter out tables that we add after processing - if tableName == TableNameInsights || tableName == TableNamePerfspect || tableName == summaryTableName { - continue - } - includedTable, err := findTableByName(tables, tableName) - if err != nil { - slog.Warn("table from raw report not found in current tables", slog.String("table", tableName), slog.String("target", rawReport.TargetName)) - continue - } - includedTables = append(includedTables, *includedTable) - } - orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, Tables: includedTables}) - } - return orderedTargetScriptOutputs, nil -} - -// outputsFromTargets runs the scripts on the targets and returns the data in the order of the targets -func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tables []table.TableDefinition, scriptParams map[string]string, statusUpdate progress.MultiSpinnerUpdateFunc, localTempDir string) ([]TargetScriptOutputs, error) { - orderedTargetScriptOutputs := []TargetScriptOutputs{} - channelTargetScriptOutputs := make(chan TargetScriptOutputs) - channelError := make(chan error) - // create the list of tables and associated scripts for each target - targetTables := [][]table.TableDefinition{} - targetScriptNames := [][]string{} - for targetIdx, target := range myTargets { - targetTables = append(targetTables, []table.TableDefinition{}) - targetScriptNames = append(targetScriptNames, []string{}) - for _, tbl := range tables { - if isTableForTarget(tbl, target, localTempDir) { - // add table to list of tables to collect - targetTables[targetIdx] = append(targetTables[targetIdx], tbl) - // add scripts to list of scripts to run - for _, scriptName := range tbl.ScriptNames { - targetScriptNames[targetIdx] = util.UniqueAppend(targetScriptNames[targetIdx], scriptName) - } - } else { - slog.Debug("table not supported for target", slog.String("table", tbl.Name), slog.String("target", target.GetName())) - } - } - } - // run the scripts on the targets - for targetIdx, target := range myTargets { - scriptsToRunOnTarget := []script.ScriptDefinition{} - for _, scriptName := range targetScriptNames[targetIdx] { - script := script.GetParameterizedScriptByName(scriptName, scriptParams) - scriptsToRunOnTarget = append(scriptsToRunOnTarget, script) - } - // run the selected scripts on the target - ctrlCToStop := cmd.Name() == "telemetry" || cmd.Name() == "flamegraph" - go collectOnTarget(target, scriptsToRunOnTarget, localTempDir, scriptParams["Duration"], ctrlCToStop, channelTargetScriptOutputs, channelError, statusUpdate) - } - // wait for scripts to run on all targets - var allTargetScriptOutputs []TargetScriptOutputs - for range myTargets { - select { - case scriptOutputs := <-channelTargetScriptOutputs: - allTargetScriptOutputs = append(allTargetScriptOutputs, scriptOutputs) - case err := <-channelError: - slog.Error(err.Error()) - } - } - // allTargetScriptOutputs is in the order of data collection completion - // reorder to match order of myTargets - for targetIdx, target := range myTargets { - for _, targetScriptOutputs := range allTargetScriptOutputs { - if targetScriptOutputs.TargetName == target.GetName() { - targetScriptOutputs.Tables = targetTables[targetIdx] - orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, targetScriptOutputs) - break - } - } - } - return orderedTargetScriptOutputs, nil -} - -// isTableForTarget checks if the given table is applicable for the specified target -func isTableForTarget(tbl table.TableDefinition, t target.Target, localTempDir string) bool { - if len(tbl.Architectures) > 0 { - architecture, err := t.GetArchitecture() - if err != nil { - slog.Error("failed to get architecture for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) - return false - } - if !slices.Contains(tbl.Architectures, architecture) { - return false - } - } - if len(tbl.Vendors) > 0 { - vendor, err := GetTargetVendor(t) - if err != nil { - slog.Error("failed to get vendor for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) - return false - } - if !slices.Contains(tbl.Vendors, vendor) { - return false - } - } - if len(tbl.MicroArchitectures) > 0 { - uarch, err := GetTargetMicroArchitecture(t, localTempDir, false) - if err != nil { - slog.Error("failed to get microarchitecture for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) - } - shortUarch := strings.Split(uarch, "_")[0] // handle EMR_XCC, etc. - shortUarch = strings.Split(shortUarch, "-")[0] // handle GNR-D - shortUarch = strings.Split(shortUarch, " ")[0] // handle Turin (Zen 5) - if !slices.Contains(tbl.MicroArchitectures, uarch) && !slices.Contains(tbl.MicroArchitectures, shortUarch) { - return false - } - } - return true -} - -// elevatedPrivilegesRequired returns true if any of the scripts needed for the tables require elevated privileges -func elevatedPrivilegesRequired(tables []table.TableDefinition) bool { - for _, tbl := range tables { - for _, scriptName := range tbl.ScriptNames { - script := script.GetScriptByName(scriptName) - if script.Superuser { - return true - } - } - } - return false -} - -// collectOnTarget runs the scripts on the target and sends the results to the appropriate channels -func collectOnTarget(myTarget target.Target, scriptsToRun []script.ScriptDefinition, localTempDir string, duration string, ctrlCToStop bool, channelTargetScriptOutputs chan TargetScriptOutputs, channelError chan error, statusUpdate progress.MultiSpinnerUpdateFunc) { - // run the scripts on the target - status := "collecting data" - if ctrlCToStop && duration == "0" { - status += ", press Ctrl+c to stop" - } else if duration != "0" && duration != "" { - status += fmt.Sprintf(" for %s seconds", duration) - } - scriptOutputs, err := RunScripts(myTarget, scriptsToRun, true, localTempDir, statusUpdate, status, false) - if err != nil { - if statusUpdate != nil { - _ = statusUpdate(myTarget.GetName(), fmt.Sprintf("error collecting data: %v", err)) - } - err = fmt.Errorf("error running data collection scripts on %s: %v", myTarget.GetName(), err) - channelError <- err - return - } - if statusUpdate != nil { - _ = statusUpdate(myTarget.GetName(), "collection complete") - } - channelTargetScriptOutputs <- TargetScriptOutputs{TargetName: myTarget.GetName(), ScriptOutputs: scriptOutputs} -} diff --git a/internal/common/table_defs.go b/internal/common/table_defs.go deleted file mode 100644 index f2daef4b..00000000 --- a/internal/common/table_defs.go +++ /dev/null @@ -1,75 +0,0 @@ -package common - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "perfspect/internal/script" - "perfspect/internal/table" - "strings" -) - -const SystemSummaryTableName = "System Summary" - -var TableDefinitions = map[string]table.TableDefinition{ - SystemSummaryTableName: { - Name: SystemSummaryTableName, - MenuLabel: SystemSummaryTableName, - HasRows: false, - ScriptNames: []string{ - script.HostnameScriptName, - script.DateScriptName, - script.LscpuScriptName, - script.LscpuCacheScriptName, - script.LspciBitsScriptName, - script.LspciDevicesScriptName, - script.SpecCoreFrequenciesScriptName, - script.MeminfoScriptName, - script.NicInfoScriptName, - script.DiskInfoScriptName, - script.UnameScriptName, - script.EtcReleaseScriptName, - script.PackagePowerLimitName, - script.EpbScriptName, - script.ScalingDriverScriptName, - script.ScalingGovernorScriptName, - script.CstatesScriptName, - script.ElcScriptName, - script.ArmImplementerScriptName, - script.ArmPartScriptName, - script.ArmDmidecodePartScriptName, - script.DmidecodeScriptName, - }, - FieldsFunc: briefSummaryTableValues}, -} - -func briefSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Field { - memory := InstalledMemoryFromOutput(outputs) // Dmidecode, try this first - if memory == "" { - memory = ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`) // Meminfo as fallback - } - return []table.Field{ - {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, // Hostname - {Name: "Time", Values: []string{strings.TrimSpace(outputs[script.DateScriptName].Stdout)}}, // Date - {Name: "CPU Model", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, // Lscpu - {Name: "Microarchitecture", Values: []string{UarchFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices - {Name: "TDP", Values: []string{TDPFromOutput(outputs)}}, // PackagePowerLimit - {Name: "Sockets", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "Cores per Socket", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, // Lscpu - {Name: "Hyperthreading", Values: []string{HyperthreadingFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices - {Name: "CPUs", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "NUMA Nodes", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, // ScalingDriver - {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, // ScalingGovernor - {Name: "C-states", Values: []string{CstatesSummaryFromOutput(outputs)}}, // Cstates - {Name: "Maximum Frequency", Values: []string{MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, // MaximumFrequency, SpecCoreFrequencies, - {Name: "All-core Maximum Frequency", Values: []string{AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, // Lscpu, LspciBits, LspciDevices, SpecCoreFrequencies - {Name: "Energy Performance Bias", Values: []string{EPBFromOutput(outputs)}}, // EpbSource, EpbBIOS, EpbOS - {Name: "Efficiency Latency Control", Values: []string{ELCSummaryFromOutput(outputs)}}, // Elc - {Name: "Memory", Values: []string{memory}}, // Dmidecode,Meminfo - {Name: "NIC", Values: []string{NICSummaryFromOutput(outputs)}}, // Lshw, NicInfo - {Name: "Disk", Values: []string{DiskSummaryFromOutput(outputs)}}, // DiskInfo, Hdparm - {Name: "OS", Values: []string{OperatingSystemFromOutput(outputs)}}, // EtcRelease - {Name: "Kernel", Values: []string{ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, // Uname - } -} diff --git a/internal/common/table_helpers.go b/internal/common/table_helpers.go deleted file mode 100644 index 85f9c6f9..00000000 --- a/internal/common/table_helpers.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -// table_helpers.go contains base helper functions that are used to extract values from the output of the scripts. - -package common - -import ( - "fmt" - "log/slog" - "perfspect/internal/cpus" - "perfspect/internal/script" - "perfspect/internal/util" - "regexp" - "strconv" - "strings" -) - -// ValFromRegexSubmatch searches for a regex pattern in the given output string and returns the first captured group. -// If no match is found, an empty string is returned. -func ValFromRegexSubmatch(output string, regex string) string { - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - return match[1] - } - } - return "" -} - -// ValsFromRegexSubmatch extracts the captured groups from each line in the output -// that matches the given regular expression. -// It returns a slice of strings containing the captured values. -func ValsFromRegexSubmatch(output string, regex string) []string { - var vals []string - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - vals = append(vals, match[1]) - } - } - return vals -} - -// ValsArrayFromRegexSubmatch returns all matches for all capture groups in regex -func ValsArrayFromRegexSubmatch(output string, regex string) (vals [][]string) { - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(line) - if len(match) > 1 { - vals = append(vals, match[1:]) - } - } - return -} - -// ValFromDmiDecodeRegexSubmatch extracts a value from the DMI decode output using a regular expression. -// It takes the DMI decode output, the DMI type, and the regular expression as input parameters. -// It returns the extracted value as a string. -func ValFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regex string) string { - return ValFromRegexSubmatch(GetDmiDecodeType(dmiDecodeOutput, dmiType), regex) -} - -func ValsArrayFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regexes ...string) (vals [][]string) { - var res []*regexp.Regexp - for _, r := range regexes { - re := regexp.MustCompile(r) - res = append(res, re) - } - for _, entry := range GetDmiDecodeEntries(dmiDecodeOutput, dmiType) { - row := make([]string, len(res)) - for _, line := range entry { - for i, re := range res { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - row[i] = match[1] - } - } - } - vals = append(vals, row) - } - return -} - -// GetDmiDecodeType extracts the lines from the given `dmiDecodeOutput` that belong to the specified `dmiType`. -func GetDmiDecodeType(dmiDecodeOutput string, dmiType string) string { - var lines []string - start := false - for line := range strings.SplitSeq(dmiDecodeOutput, "\n") { - if start && strings.HasPrefix(line, "Handle ") { - start = false - } - if strings.Contains(line, "DMI type "+dmiType+",") { - start = true - } - if start { - lines = append(lines, line) - } - } - return strings.Join(lines, "\n") -} - -// GetDmiDecodeEntries extracts the entries from the given `dmiDecodeOutput` that belong to the specified `dmiType`. -func GetDmiDecodeEntries(dmiDecodeOutput string, dmiType string) (entries [][]string) { - lines := strings.Split(dmiDecodeOutput, "\n") - var entry []string - typeMatch := false - for _, line := range lines { - if strings.HasPrefix(line, "Handle ") { - if strings.Contains(line, "DMI type "+dmiType+",") { - // type match - typeMatch = true - entry = []string{} - } else { - // not a type match - typeMatch = false - } - } - if !typeMatch { - continue - } - if line == "" { - // end of type match entry - entries = append(entries, entry) - } else { - // a line in the entry - entry = append(entry, line) - } - } - return -} - -// GetSectionsFromOutput parses output into sections, where the section name -// is the key in a map and the section content is the value -// sections are delimited by lines of the form ##########
########## -// example: -// ##########
########## -//
-//
-// ##########
########## -//
-// -// returns a map of section name to section content -// if the output is empty or contains no section headers, returns an empty map -// if a section contains no content, the value for that section is an empty string -func GetSectionsFromOutput(output string) map[string]string { - sections := make(map[string]string) - re := regexp.MustCompile(`^########## (.+?) ##########$`) - var sectionName string - for line := range strings.SplitSeq(output, "\n") { - // check if the line is a section header - match := re.FindStringSubmatch(line) - if match != nil { - // if the section name isn't in the map yet, add it - if _, ok := sections[match[1]]; !ok { - sections[match[1]] = "" - } - // save the section name - sectionName = match[1] - continue - } - if sectionName != "" { - sections[sectionName] += line + "\n" - } - } - return sections -} - -// SectionValueFromOutput returns the content of a section from the output -// if the section doesn't exist, returns an empty string -// if the section exists but has no content, returns an empty string -func SectionValueFromOutput(output string, sectionName string) string { - sections := GetSectionsFromOutput(output) - if len(sections) == 0 { - slog.Warn("no sections in output") - return "" - } - if _, ok := sections[sectionName]; !ok { - slog.Warn("section not found in output", slog.String("section", sectionName)) - return "" - } - if sections[sectionName] == "" { - slog.Warn("No content for section:", slog.String("section", sectionName)) - return "" - } - return sections[sectionName] -} - -// UarchFromOutput returns the microarchitecture of the CPU or an empty string, if no match is found. -func UarchFromOutput(outputs map[string]script.ScriptOutput) string { - family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) - part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) - dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) - cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, capid4, devices, implementer, part, dmidecodePart, "")) - if err != nil { - slog.Error("error getting CPU characteristics", slog.String("error", err.Error())) - return "" - } - return cpu.MicroArchitecture -} - -func HyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { - family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) - part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) - dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) - sockets := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - coresPerSocket := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) - cpuCount := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) - onlineCpus := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) - threadsPerCore := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) - - numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs - if err != nil { - slog.Error("error parsing cpus from lscpu") - return "" - } - onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs - numOnlineCpus := len(onlineCpusList) - if err != nil { - slog.Error("error parsing online cpus from lscpu") - numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead - } - numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core - if err != nil { - slog.Error("error parsing threads per core from lscpu") - numThreadsPerCore = 0 - } - numSockets, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("error parsing sockets from lscpu") - return "" - } - numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores - if err != nil { - slog.Error("error parsing cores per sockets from lscpu") - return "" - } - cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, "", "", implementer, part, dmidecodePart, "")) - if err != nil { - slog.Warn("error getting CPU characteristics", slog.String("error", err.Error())) - return "" - } - if numOnlineCpus > 0 && numOnlineCpus < numCPUs { - // if online CPUs list is available, use it to determine the number of CPUs - // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs - numCPUs = numOnlineCpus - } - if cpu.LogicalThreadCount < 2 { - return "N/A" - } else if numThreadsPerCore == 1 { - // if threads per core is 1, hyperthreading is disabled - return "Disabled" - } else if numThreadsPerCore >= 2 { - // if threads per core is greater than or equal to 2, hyperthreading is enabled - return "Enabled" - } else if numCPUs > numCoresPerSocket*numSockets { - // if the threads per core attribute is not available, we can still check if hyperthreading is enabled - // by checking if the number of logical CPUs is greater than the number of physical cores - return "Enabled" - } else { - return "Disabled" - } -} - -func OperatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { - os := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) - centos := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) - if centos != "" { - os = centos - } - return os -} - -func TDPFromOutput(outputs map[string]script.ScriptOutput) string { - msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) - msr, err := strconv.ParseInt(msrHex, 16, 0) - if err != nil || msr == 0 { - return "" - } - return fmt.Sprint(msr/8) + "W" -} - -const ( - BankLocatorIdx = iota - LocatorIdx - ManufacturerIdx - PartIdx - SerialIdx - SizeIdx - TypeIdx - DetailIdx - SpeedIdx - RankIdx - ConfiguredSpeedIdx - DerivedSocketIdx - DerivedChannelIdx - DerivedSlotIdx -) - -func DimmInfoFromDmiDecode(dmiDecodeOutput string) [][]string { - return ValsArrayFromDmiDecodeRegexSubmatch( - dmiDecodeOutput, - "17", - `^Bank Locator:\s*(.+?)$`, - `^Locator:\s*(.+?)$`, - `^Manufacturer:\s*(.+?)$`, - `^Part Number:\s*(.+?)\s*$`, - `^Serial Number:\s*(.+?)\s*$`, - `^Size:\s*(.+?)$`, - `^Type:\s*(.+?)$`, - `^Type Detail:\s*(.+?)$`, - `^Speed:\s*(.+?)$`, - `^Rank:\s*(.+?)$`, - `^Configured.*Speed:\s*(.+?)$`, - ) -} - -func InstalledMemoryFromOutput(outputs map[string]script.ScriptOutput) string { - dimmInfo := DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) - dimmTypeCount := make(map[string]int) - for _, dimm := range dimmInfo { - dimmKey := dimm[TypeIdx] + ":" + dimm[SizeIdx] + ":" + dimm[SpeedIdx] + ":" + dimm[ConfiguredSpeedIdx] - if count, ok := dimmTypeCount[dimmKey]; ok { - dimmTypeCount[dimmKey] = count + 1 - } else { - dimmTypeCount[dimmKey] = 1 - } - } - var summaries []string - re := regexp.MustCompile(`(\d+)\s*(\w*)`) - for dimmKey, count := range dimmTypeCount { - fields := strings.Split(dimmKey, ":") - match := re.FindStringSubmatch(fields[1]) // size field - if match != nil { - size, err := strconv.Atoi(match[1]) - if err != nil { - slog.Warn("Don't recognize DIMM size format.", slog.String("field", fields[1])) - return "" - } - sum := count * size - unit := match[2] - dimmType := fields[0] - speed := strings.ReplaceAll(fields[2], " ", "") - configuredSpeed := strings.ReplaceAll(fields[3], " ", "") - summary := fmt.Sprintf("%d%s (%dx%d%s %s %s [%s])", sum, unit, count, size, unit, dimmType, speed, configuredSpeed) - summaries = append(summaries, summary) - } - } - return strings.Join(summaries, "; ") -} diff --git a/cmd/report/accelerator.go b/internal/extract/accelerator.go similarity index 72% rename from cmd/report/accelerator.go rename to internal/extract/accelerator.go index f9ca55b0..c6bb582e 100644 --- a/cmd/report/accelerator.go +++ b/internal/extract/accelerator.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package report +package extract import ( "fmt" @@ -15,6 +15,7 @@ import ( // references: // https://pci-ids.ucw.cz/read/PC/8086 +// AcceleratorDefinition represents an Intel accelerator device. type AcceleratorDefinition struct { MfgID string DevID string @@ -23,7 +24,8 @@ type AcceleratorDefinition struct { Description string } -var acceleratorDefinitions = []AcceleratorDefinition{ +// AcceleratorDefinitions contains all known Intel accelerator definitions. +var AcceleratorDefinitions = []AcceleratorDefinition{ { MfgID: "8086", DevID: "(2710|2714)", @@ -68,18 +70,20 @@ var acceleratorDefinitions = []AcceleratorDefinition{ }, } -func acceleratorNames() []string { +// AcceleratorNames returns the short names of all accelerators. +func AcceleratorNames() []string { var names []string - for _, accel := range acceleratorDefinitions { + for _, accel := range AcceleratorDefinitions { names = append(names, accel.Name) } return names } -func acceleratorCountsFromOutput(outputs map[string]script.ScriptOutput) []string { +// AcceleratorCountsFromOutput returns the count of each accelerator type from lshw output. +func AcceleratorCountsFromOutput(outputs map[string]script.ScriptOutput) []string { var counts []string lshw := outputs[script.LshwScriptName].Stdout - for _, accel := range acceleratorDefinitions { + for _, accel := range AcceleratorDefinitions { regex := fmt.Sprintf("%s:%s", accel.MfgID, accel.DevID) re := regexp.MustCompile(regex) count := len(re.FindAllString(lshw, -1)) @@ -88,9 +92,10 @@ func acceleratorCountsFromOutput(outputs map[string]script.ScriptOutput) []strin return counts } -func acceleratorWorkQueuesFromOutput(outputs map[string]script.ScriptOutput) []string { +// AcceleratorWorkQueuesFromOutput returns the work queues for IAA and DSA accelerators. +func AcceleratorWorkQueuesFromOutput(outputs map[string]script.ScriptOutput) []string { var queues []string - for _, accel := range acceleratorDefinitions { + for _, accel := range AcceleratorDefinitions { if accel.Name == "IAA" || accel.Name == "DSA" { var scriptName string if accel.Name == "IAA" { @@ -119,26 +124,29 @@ func acceleratorWorkQueuesFromOutput(outputs map[string]script.ScriptOutput) []s return queues } -func acceleratorFullNamesFromYaml() []string { +// AcceleratorFullNames returns the full names of all accelerators. +func AcceleratorFullNames() []string { var fullNames []string - for _, accel := range acceleratorDefinitions { + for _, accel := range AcceleratorDefinitions { fullNames = append(fullNames, accel.FullName) } return fullNames } -func acceleratorDescriptionsFromYaml() []string { +// AcceleratorDescriptions returns the descriptions of all accelerators. +func AcceleratorDescriptions() []string { var descriptions []string - for _, accel := range acceleratorDefinitions { + for _, accel := range AcceleratorDefinitions { descriptions = append(descriptions, accel.Description) } return descriptions } -func acceleratorSummaryFromOutput(outputs map[string]script.ScriptOutput) string { +// AcceleratorSummaryFromOutput returns a summary string of accelerator counts. +func AcceleratorSummaryFromOutput(outputs map[string]script.ScriptOutput) string { var summary []string - accelerators := acceleratorNames() - counts := acceleratorCountsFromOutput(outputs) + accelerators := AcceleratorNames() + counts := AcceleratorCountsFromOutput(outputs) for i, name := range accelerators { if strings.Contains(name, "chipset") { // skip "QAT (on chipset)" in this table continue diff --git a/internal/common/cache.go b/internal/extract/cache.go similarity index 85% rename from internal/common/cache.go rename to internal/extract/cache.go index fb7ccd6d..e778217c 100644 --- a/internal/common/cache.go +++ b/internal/extract/cache.go @@ -1,16 +1,17 @@ -package common - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "fmt" "log/slog" + "strconv" + "strings" + "perfspect/internal/cpus" "perfspect/internal/script" "perfspect/internal/util" - "strconv" - "strings" ) // GetL3MSRMB returns the L3 cache size per cache instance (per socket on Intel) and total in MB from MSR. @@ -106,13 +107,7 @@ func L3FromOutput(outputs map[string]script.ScriptOutput) string { } // L3PerCoreFromOutput calculates the amount of L3 cache (in MiB) available per core -// based on the provided script outputs. It first checks if the host is virtualized, -// in which case it returns an empty string since the calculation is not applicable. -// It parses the number of cores per socket and the number of sockets from the lscpu -// output. It attempts to retrieve the total L3 cache size using MSR data, falling -// back to parsing lscpu output if necessary. The result is formatted as a string -// with up to three decimal places, followed by " MiB". If any required data cannot -// be parsed, it logs an error and returns an empty string. +// based on the provided script outputs. func L3PerCoreFromOutput(outputs map[string]script.ScriptOutput) string { virtualization := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization.*:\s*(.+?)$`) if virtualization == "full" { @@ -159,7 +154,8 @@ func FormatCacheSizeMB(size float64) string { return fmt.Sprintf("%sM", val) } -type lscpuCacheEntry struct { +// LscpuCacheEntry represents a cache entry from lscpu output. +type LscpuCacheEntry struct { Name string OneSize string AllSize string @@ -172,13 +168,7 @@ type lscpuCacheEntry struct { } // ParseLscpuCacheOutput parses the output of `lscpu -C` (text/tabular) -// Example output: -// NAME ONE-SIZE ALL-SIZE WAYS TYPE LEVEL SETS PHY-LINE COHERENCY-SIZE -// L1d 48K 8.1M 12 Data 1 64 1 64 -// L1i 64K 10.8M 16 Instruction 1 64 1 64 -// L2 2M 344M 16 Unified 2 2048 1 64 -// L3 336M 672M 16 Unified 3 344064 1 64 -func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, error) { +func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]LscpuCacheEntry, error) { trimmed := strings.TrimSpace(LscpuCacheOutput) if trimmed == "" { slog.Warn("lscpu cache output is empty") @@ -198,7 +188,7 @@ func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, for i, h := range headerCols { idx[strings.ToLower(strings.TrimSpace(h))] = i } - out := map[string]lscpuCacheEntry{} + out := map[string]LscpuCacheEntry{} for _, line := range lines[1:] { l := strings.TrimSpace(line) if l == "" { @@ -208,7 +198,7 @@ func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, if len(cols) < 4 { continue } - entry := lscpuCacheEntry{} + entry := LscpuCacheEntry{} if i, ok := idx["name"]; ok && i < len(cols) { entry.Name = cols[i] } @@ -244,13 +234,12 @@ func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, return out, nil } -// L1l2CacheSizeFromLscpuCache extracts the data cache size from the provided lscpuCacheEntry. -func L1l2CacheSizeFromLscpuCache(entry lscpuCacheEntry) string { +// L1l2CacheSizeFromLscpuCache extracts the data cache size from the provided LscpuCacheEntry. +func L1l2CacheSizeFromLscpuCache(entry LscpuCacheEntry) string { return entry.OneSize } // parseCacheSizeToMB parses a cache size string (e.g., "32K", "2M", "1G") and converts it to megabytes. -// The input string can have optional "B" suffix and supports K, M, G units. func parseCacheSizeToMB(sizeString, fieldName string) (float64, error) { if sizeString == "" { return 0, fmt.Errorf("%s is empty", fieldName) @@ -279,12 +268,12 @@ func parseCacheSizeToMB(sizeString, fieldName string) (float64, error) { return sizeVal * multiplier, nil } -// l3CacheTotalSizeFromLscpuCacheMB extracts the total L3 cache size in megabytes from the provided lscpuCacheEntry. -func l3CacheTotalSizeFromLscpuCacheMB(entry lscpuCacheEntry) (float64, error) { +// l3CacheTotalSizeFromLscpuCacheMB extracts the total L3 cache size in megabytes from the provided LscpuCacheEntry. +func l3CacheTotalSizeFromLscpuCacheMB(entry LscpuCacheEntry) (float64, error) { return parseCacheSizeToMB(entry.AllSize, "L3 cache all-size") } -// l3CacheInstanceSizeFromLscpuCacheMB extracts the L3 cache instance size in megabytes from the provided lscpuCacheEntry. -func l3CacheInstanceSizeFromLscpuCacheMB(entry lscpuCacheEntry) (float64, error) { +// l3CacheInstanceSizeFromLscpuCacheMB extracts the L3 cache instance size in megabytes from the provided LscpuCacheEntry. +func l3CacheInstanceSizeFromLscpuCacheMB(entry LscpuCacheEntry) (float64, error) { return parseCacheSizeToMB(entry.OneSize, "L3 cache one-size") } diff --git a/internal/common/cache_test.go b/internal/extract/cache_test.go similarity index 96% rename from internal/common/cache_test.go rename to internal/extract/cache_test.go index a446c745..16b6642b 100644 --- a/internal/common/cache_test.go +++ b/internal/extract/cache_test.go @@ -1,4 +1,4 @@ -package common +package extract // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -52,14 +52,14 @@ func TestParseLscpuCacheOutput(t *testing.T) { input string expectedError bool expectedLength int - expectedL3 lscpuCacheEntry + expectedL3 LscpuCacheEntry }{ { name: "Typical table output", input: "NAME ONE-SIZE ALL-SIZE WAYS TYPE LEVEL SETS PHY-LINE COHERENCY-SIZE\nL1d 48K 8.1M 12 Data 1 64 1 64\nL1i 64K 10.8M 16 Instruction 1 64 1 64\nL2 2M 344M 16 Unified 2 2048 1 64\nL3 336M 672M 16 Unified 3 344064 1 64", expectedError: false, expectedLength: 4, - expectedL3: lscpuCacheEntry{ + expectedL3: LscpuCacheEntry{ Name: "L3", OneSize: "336M", AllSize: "672M", @@ -76,7 +76,7 @@ func TestParseLscpuCacheOutput(t *testing.T) { input: "NAME ONE-SIZE ALL-SIZE WAYS TYPE LEVEL\nL3 320M 640M 20 Unified 3", expectedError: false, expectedLength: 1, - expectedL3: lscpuCacheEntry{ + expectedL3: LscpuCacheEntry{ Name: "L3", OneSize: "320M", AllSize: "640M", diff --git a/internal/extract/cpu.go b/internal/extract/cpu.go new file mode 100644 index 00000000..bb6347f3 --- /dev/null +++ b/internal/extract/cpu.go @@ -0,0 +1,258 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package extract + +import ( + "fmt" + "log/slog" + "strconv" + "strings" + + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/util" +) + +// NumaCPUListFromOutput returns the NUMA node CPU list from lscpu output. +func NumaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { + nodeCPUs := ValsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) + return strings.Join(nodeCPUs, " :: ") +} + +// PPINsFromOutput returns the unique PPINs from MSR output. +func PPINsFromOutput(outputs map[string]script.ScriptOutput) string { + uniquePpins := []string{} + for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { + parts := strings.Split(line, ":") + if len(parts) < 2 { + continue + } + ppin := strings.TrimSpace(parts[1]) + found := false + for _, p := range uniquePpins { + if string(p) == ppin { + found = true + break + } + } + if !found && ppin != "" { + uniquePpins = append(uniquePpins, ppin) + } + } + return strings.Join(uniquePpins, ", ") +} + +// ChannelsFromOutput returns the number of memory channels from CPU characteristics. +func ChannelsFromOutput(outputs map[string]script.ScriptOutput) string { + family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) + part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) + dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) + cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, capid4, devices, implementer, part, dmidecodePart, "")) + if err != nil { + slog.Error("error getting CPU characteristics", slog.String("error", err.Error())) + return "" + } + return fmt.Sprintf("%d", cpu.MemoryChannelCount) +} + +// TurboEnabledFromOutput returns the turbo/boost status. +func TurboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { + vendor := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + switch vendor { + case cpus.IntelVendor: + val := ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) + if val == "true" { + return "Enabled" + } + if val == "false" { + return "Disabled" + } + return "" // unknown value + case cpus.AMDVendor: + val := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) + if val != "" { + return val + " (AMD Frequency Boost)" + } + } + return "" +} + +// ChaCountFromOutput returns the CHA count from MSR output. +func ChaCountFromOutput(outputs map[string]script.ScriptOutput) string { + // output is the result of three rdmsr calls + // - client cha count + // - cha count + // - spr cha count + // stop when we find a non-zero value + // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout + for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { + if hexCount != "" && hexCount != "0" { + count, err := strconv.ParseInt(hexCount, 16, 64) + if err == nil { + return fmt.Sprintf("%d", count) + } + } + } + return "" +} + +// NumaBalancingFromOutput returns the NUMA balancing status. +func NumaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { + if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { + return "Enabled" + } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { + return "Disabled" + } + return "" +} + +// ClusteringModeFromOutput returns the clustering mode based on microarchitecture and NUMA configuration. +func ClusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { + uarch := UarchFromOutput(outputs) + sockets := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + nodes := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) + if uarch == "" || sockets == "" || nodes == "" { + return "" + } + socketCount, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("failed to parse socket count", slog.String("error", err.Error())) + return "" + } + nodeCount, err := strconv.Atoi(nodes) + if err != nil { + slog.Error("failed to parse node count", slog.String("error", err.Error())) + return "" + } + if nodeCount == 0 || socketCount == 0 { + slog.Error("node count or socket count is zero") + return "" + } + nodesPerSocket := nodeCount / socketCount + switch uarch { + case cpus.UarchGNR_X1: + return "All2All" + case cpus.UarchGNR_X2: + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case cpus.UarchGNR_X3: + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + case cpus.UarchSRF_SP: + return "UMA 2 (Hemi)" + case cpus.UarchSRF_AP: + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case cpus.UarchCWF: + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + } + return "" +} + +// HyperthreadingFromOutput returns the hyperthreading status from script outputs. +func HyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { + family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) + part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) + dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) + sockets := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + coresPerSocket := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) + cpuCount := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) + onlineCpus := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) + threadsPerCore := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) + + numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs + if err != nil { + slog.Error("error parsing cpus from lscpu") + return "" + } + onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs + numOnlineCpus := len(onlineCpusList) + if err != nil { + slog.Error("error parsing online cpus from lscpu") + numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead + } + numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core + if err != nil { + slog.Error("error parsing threads per core from lscpu") + numThreadsPerCore = 0 + } + numSockets, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("error parsing sockets from lscpu") + return "" + } + numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores + if err != nil { + slog.Error("error parsing cores per sockets from lscpu") + return "" + } + cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, "", "", implementer, part, dmidecodePart, "")) + if err != nil { + slog.Warn("error getting CPU characteristics", slog.String("error", err.Error())) + return "" + } + if numOnlineCpus > 0 && numOnlineCpus < numCPUs { + // if online CPUs list is available, use it to determine the number of CPUs + // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs + numCPUs = numOnlineCpus + } + if cpu.LogicalThreadCount < 2 { + return "N/A" + } else if numThreadsPerCore == 1 { + // if threads per core is 1, hyperthreading is disabled + return "Disabled" + } else if numThreadsPerCore >= 2 { + // if threads per core is greater than or equal to 2, hyperthreading is enabled + return "Enabled" + } else if numCPUs > numCoresPerSocket*numSockets { + // if the threads per core attribute is not available, we can still check if hyperthreading is enabled + // by checking if the number of logical CPUs is greater than the number of physical cores + return "Enabled" + } else { + return "Disabled" + } +} + +// UarchFromOutput returns the microarchitecture of the CPU or an empty string, if no match is found. +func UarchFromOutput(outputs map[string]script.ScriptOutput) string { + family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + implementer := strings.TrimSpace(outputs[script.ArmImplementerScriptName].Stdout) + part := strings.TrimSpace(outputs[script.ArmPartScriptName].Stdout) + dmidecodePart := strings.TrimSpace(outputs[script.ArmDmidecodePartScriptName].Stdout) + cpu, err := cpus.GetCPU(cpus.NewCPUIdentifier(family, model, stepping, capid4, devices, implementer, part, dmidecodePart, "")) + if err != nil { + slog.Error("error getting CPU characteristics", slog.String("error", err.Error())) + return "" + } + return cpu.MicroArchitecture +} diff --git a/internal/common/table_helpers_test.go b/internal/extract/cpu_test.go similarity index 65% rename from internal/common/table_helpers_test.go rename to internal/extract/cpu_test.go index bd7237a9..3a2bcb9e 100644 --- a/internal/common/table_helpers_test.go +++ b/internal/extract/cpu_test.go @@ -1,161 +1,12 @@ -package common - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause +package extract import ( "perfspect/internal/script" - "reflect" "testing" ) -func TestGetSectionsFromOutput(t *testing.T) { - tests := []struct { - name string - output string - want map[string]string - }{ - { - name: "Valid sections with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2 -########## Section C ########## -Content C1`, - want: map[string]string{ - "Section A": "Content A1\nContent A2\n", - "Section B": "Content B1\nContent B2\n", - "Section C": "Content C1\n", - }, - }, - { - name: "Valid sections with empty content", - output: `########## Section A ########## -########## Section B ########## -########## Section C ##########`, - want: map[string]string{ - "Section A": "", - "Section B": "", - "Section C": "", - }, - }, - { - name: "No sections", - output: "No section headers here", - want: map[string]string{}, - }, - { - name: "Empty output", - output: ``, - want: map[string]string{}, - }, - { - name: "Empty lines in output", - output: "\n\n\n", - want: map[string]string{}, - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - want: map[string]string{ - "Section A": "\nContent A1\n\n", - "Section B": "Content B1\n", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := GetSectionsFromOutput(tt.output) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSectionValueFromOutput(t *testing.T) { - tests := []struct { - name string - output string - sectionName string - want string - }{ - { - name: "Section A exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section A", - want: "Content A1\nContent A2\n", - }, - { - name: "Section B exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section B", - want: "Content B1\nContent B2\n", - }, - { - name: "Section exists with no content", - output: `########## Section A ########## -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "", - }, - { - name: "Section does not exist", - output: `########## Section A ########## -Content A1 -########## Section B ########## -Content B1`, - sectionName: "Section C", - want: "", - }, - { - name: "Empty output", - output: "", - sectionName: "Section A", - want: "", - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "\nContent A1\n\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := SectionValueFromOutput(tt.output, tt.sectionName) - if got != tt.want { - t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause func TestHyperthreadingFromOutput(t *testing.T) { tests := []struct { diff --git a/cmd/report/dimm.go b/internal/extract/dimm.go similarity index 68% rename from cmd/report/dimm.go rename to internal/extract/dimm.go index c9140e70..d57f3631 100644 --- a/cmd/report/dimm.go +++ b/internal/extract/dimm.go @@ -1,29 +1,90 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "fmt" "log/slog" - "perfspect/internal/common" - "perfspect/internal/script" "regexp" "strconv" "strings" + + "perfspect/internal/script" +) + +// DerivedDIMMFields holds derived socket, channel, and slot information for a DIMM. +type DerivedDIMMFields struct { + Socket string + Channel string + Slot string +} + +// DIMM info field indices +const ( + BankLocatorIdx = iota + LocatorIdx + ManufacturerIdx + PartIdx + SerialIdx + SizeIdx + TypeIdx + DetailIdx + SpeedIdx + RankIdx + ConfiguredSpeedIdx + DerivedSocketIdx + DerivedChannelIdx + DerivedSlotIdx ) -func populatedChannelsFromOutput(outputs map[string]script.ScriptOutput) string { +// InstalledMemoryFromOutput returns a summary of installed memory from script outputs. +func InstalledMemoryFromOutput(outputs map[string]script.ScriptOutput) string { + dimmInfo := DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) + dimmTypeCount := make(map[string]int) + for _, dimm := range dimmInfo { + dimmKey := dimm[TypeIdx] + ":" + dimm[SizeIdx] + ":" + dimm[SpeedIdx] + ":" + dimm[ConfiguredSpeedIdx] + if count, ok := dimmTypeCount[dimmKey]; ok { + dimmTypeCount[dimmKey] = count + 1 + } else { + dimmTypeCount[dimmKey] = 1 + } + } + var summaries []string + re := regexp.MustCompile(`(\d+)\s*(\w*)`) + for dimmKey, count := range dimmTypeCount { + fields := strings.Split(dimmKey, ":") + match := re.FindStringSubmatch(fields[1]) // size field + if match != nil { + size, err := strconv.Atoi(match[1]) + if err != nil { + slog.Warn("Don't recognize DIMM size format.", slog.String("field", fields[1])) + return "" + } + sum := count * size + unit := match[2] + dimmType := fields[0] + speed := strings.ReplaceAll(fields[2], " ", "") + configuredSpeed := strings.ReplaceAll(fields[3], " ", "") + summary := fmt.Sprintf("%d%s (%dx%d%s %s %s [%s])", sum, unit, count, size, unit, dimmType, speed, configuredSpeed) + summaries = append(summaries, summary) + } + } + return strings.Join(summaries, "; ") +} + +// PopulatedChannelsFromOutput returns the number of populated memory channels. +func PopulatedChannelsFromOutput(outputs map[string]script.ScriptOutput) string { channelsMap := make(map[string]bool) - dimmInfo := common.DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) - derivedDimmFields := derivedDimmsFieldFromOutput(outputs) + dimmInfo := DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) + derivedDimmFields := DerivedDimmsFieldFromOutput(outputs) if len(derivedDimmFields) != len(dimmInfo) { slog.Warn("derivedDimmFields and dimmInfo have different lengths", slog.Int("derivedDimmFields", len(derivedDimmFields)), slog.Int("dimmInfo", len(dimmInfo))) return "" } for i, dimm := range dimmInfo { - if !strings.Contains(dimm[common.SizeIdx], "No") { - channelsMap[derivedDimmFields[i].socket+","+derivedDimmFields[i].channel] = true + if !strings.Contains(dimm[SizeIdx], "No") { + channelsMap[derivedDimmFields[i].Socket+","+derivedDimmFields[i].Channel] = true } } if len(channelsMap) > 0 { @@ -32,24 +93,18 @@ func populatedChannelsFromOutput(outputs map[string]script.ScriptOutput) string return "" } -type derivedFields struct { - socket string - channel string - slot string -} - -// derivedDimmsFieldFromOutput returns a slice of derived fields from the output of a script. -func derivedDimmsFieldFromOutput(outputs map[string]script.ScriptOutput) []derivedFields { - dimmInfo := common.DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) - var derivedFields []derivedFields +// DerivedDimmsFieldFromOutput returns a slice of derived fields from the output of a script. +func DerivedDimmsFieldFromOutput(outputs map[string]script.ScriptOutput) []DerivedDIMMFields { + dimmInfo := DimmInfoFromDmiDecode(outputs[script.DmidecodeScriptName].Stdout) + var derivedFields []DerivedDIMMFields var err error - channels := channelsFromOutput(outputs) + channels := ChannelsFromOutput(outputs) numChannels, err := strconv.Atoi(channels) if err != nil || numChannels == 0 { return nil } - platformVendor := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `Vendor:\s*(.*)`) - numSockets, err := strconv.Atoi(common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(.*:\s*(.+?)$`)) + platformVendor := ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `Vendor:\s*(.*)`) + numSockets, err := strconv.Atoi(ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(.*:\s*(.+?)$`)) if err != nil || numSockets == 0 { return nil } @@ -86,15 +141,15 @@ func derivedDimmsFieldFromOutput(outputs map[string]script.ScriptOutput) []deriv * "Bank Locator" for all DIMMs is "Not Specified" and "Locator" is A1-A12 and B1-B12. * A1 and A7 are channel 0, A2 and A8 are channel 1, etc. */ -func deriveDIMMInfoDell(dimms [][]string, channelsPerSocket int) ([]derivedFields, error) { - derivedFields := make([]derivedFields, len(dimms)) +func deriveDIMMInfoDell(dimms [][]string, channelsPerSocket int) ([]DerivedDIMMFields, error) { + derivedFields := make([]DerivedDIMMFields, len(dimms)) re := regexp.MustCompile(`([ABCD])([1-9]\d*)`) for i, dimm := range dimms { - if !strings.Contains(dimm[common.BankLocatorIdx], "Not Specified") { + if !strings.Contains(dimm[BankLocatorIdx], "Not Specified") { err := fmt.Errorf("doesn't conform to expected Dell Bank Locator format") return nil, err } - match := re.FindStringSubmatch(dimm[common.LocatorIdx]) + match := re.FindStringSubmatch(dimm[LocatorIdx]) if match == nil { err := fmt.Errorf("doesn't conform to expected Dell Locator format") return nil, err @@ -108,18 +163,18 @@ func deriveDIMMInfoDell(dimms [][]string, channelsPerSocket int) ([]derivedField } // Socket // A = 0, B = 1, C = 2, D = 3 - derivedFields[i].socket = fmt.Sprintf("%d", int(alpha[0])-int('A')) + derivedFields[i].Socket = fmt.Sprintf("%d", int(alpha[0])-int('A')) // Slot if numeric <= channelsPerSocket { - derivedFields[i].slot = "0" + derivedFields[i].Slot = "0" } else { - derivedFields[i].slot = "1" + derivedFields[i].Slot = "1" } // Channel if numeric <= channelsPerSocket { - derivedFields[i].channel = fmt.Sprintf("%d", numeric-1) + derivedFields[i].Channel = fmt.Sprintf("%d", numeric-1) } else { - derivedFields[i].channel = fmt.Sprintf("%d", numeric-(channelsPerSocket+1)) + derivedFields[i].Channel = fmt.Sprintf("%d", numeric-(channelsPerSocket+1)) } } return derivedFields, nil @@ -146,16 +201,16 @@ func deriveDIMMInfoDell(dimms [][]string, channelsPerSocket int) ([]derivedField * NODE 7 CPU1 Channel7 DIMM0 * NODE 7 CPU1 Channel7 DIMM1 */ -func deriveDIMMInfoEC2(dimms [][]string, channelsPerSocket int) ([]derivedFields, error) { - derivedFields := make([]derivedFields, len(dimms)) +func deriveDIMMInfoEC2(dimms [][]string, channelsPerSocket int) ([]DerivedDIMMFields, error) { + derivedFields := make([]DerivedDIMMFields, len(dimms)) c5bankLocRe := regexp.MustCompile(`NODE\s+([1-9])`) c5locRe := regexp.MustCompile(`DIMM_(.)(.)`) c6ibankLocRe := regexp.MustCompile(`NODE\s+(\d+)`) c6ilocRe := regexp.MustCompile(`CPU(\d+)\s+Channel(\d+)\s+DIMM(\d+)`) for i, dimm := range dimms { // try c5.metal format - bankLocMatch := c5bankLocRe.FindStringSubmatch(dimm[common.BankLocatorIdx]) - locMatch := c5locRe.FindStringSubmatch(dimm[common.LocatorIdx]) + bankLocMatch := c5bankLocRe.FindStringSubmatch(dimm[BankLocatorIdx]) + locMatch := c5locRe.FindStringSubmatch(dimm[LocatorIdx]) if locMatch != nil && bankLocMatch != nil { var socket, channel, slot int socket, _ = strconv.Atoi(bankLocMatch[1]) @@ -169,22 +224,22 @@ func deriveDIMMInfoEC2(dimms [][]string, channelsPerSocket int) ([]derivedFields return nil, err } slot, _ = strconv.Atoi(locMatch[2]) - derivedFields[i].socket = fmt.Sprintf("%d", socket) - derivedFields[i].channel = fmt.Sprintf("%d", channel) - derivedFields[i].slot = fmt.Sprintf("%d", slot) + derivedFields[i].Socket = fmt.Sprintf("%d", socket) + derivedFields[i].Channel = fmt.Sprintf("%d", channel) + derivedFields[i].Slot = fmt.Sprintf("%d", slot) continue } // try c6i.metal format - bankLocMatch = c6ibankLocRe.FindStringSubmatch(dimm[common.BankLocatorIdx]) - locMatch = c6ilocRe.FindStringSubmatch(dimm[common.LocatorIdx]) + bankLocMatch = c6ibankLocRe.FindStringSubmatch(dimm[BankLocatorIdx]) + locMatch = c6ilocRe.FindStringSubmatch(dimm[LocatorIdx]) if locMatch != nil && bankLocMatch != nil { var socket, channel, slot int socket, _ = strconv.Atoi(locMatch[1]) channel, _ = strconv.Atoi(locMatch[2]) slot, _ = strconv.Atoi(locMatch[3]) - derivedFields[i].socket = fmt.Sprintf("%d", socket) - derivedFields[i].channel = fmt.Sprintf("%d", channel) - derivedFields[i].slot = fmt.Sprintf("%d", slot) + derivedFields[i].Socket = fmt.Sprintf("%d", socket) + derivedFields[i].Channel = fmt.Sprintf("%d", channel) + derivedFields[i].Slot = fmt.Sprintf("%d", slot) continue } err := fmt.Errorf("doesn't conform to expected EC2 format") @@ -197,18 +252,18 @@ func deriveDIMMInfoEC2(dimms [][]string, channelsPerSocket int) ([]derivedFields * Locator field has these: PROC 1 DIMM 1, PROC 1 DIMM 2, etc... * DIMM/slot numbering on board follows logic shown below */ -func deriveDIMMInfoHPE(dimms [][]string, numSockets int, channelsPerSocket int) ([]derivedFields, error) { - derivedFields := make([]derivedFields, len(dimms)) +func deriveDIMMInfoHPE(dimms [][]string, numSockets int, channelsPerSocket int) ([]DerivedDIMMFields, error) { + derivedFields := make([]DerivedDIMMFields, len(dimms)) slotsPerChannel := len(dimms) / (numSockets * channelsPerSocket) re := regexp.MustCompile(`PROC ([1-9]\d*) DIMM ([1-9]\d*)`) for i, dimm := range dimms { - if !strings.Contains(dimm[common.BankLocatorIdx], "Not Specified") { - err := fmt.Errorf("doesn't conform to expected HPE Bank Locator format: %s", dimm[common.BankLocatorIdx]) + if !strings.Contains(dimm[BankLocatorIdx], "Not Specified") { + err := fmt.Errorf("doesn't conform to expected HPE Bank Locator format: %s", dimm[BankLocatorIdx]) return nil, err } - match := re.FindStringSubmatch(dimm[common.LocatorIdx]) + match := re.FindStringSubmatch(dimm[LocatorIdx]) if match == nil { - err := fmt.Errorf("doesn't conform to expected HPE Locator format: %s", dimm[common.LocatorIdx]) + err := fmt.Errorf("doesn't conform to expected HPE Locator format: %s", dimm[LocatorIdx]) return nil, err } socket, err := strconv.Atoi(match[1]) @@ -217,21 +272,21 @@ func deriveDIMMInfoHPE(dimms [][]string, numSockets int, channelsPerSocket int) return nil, err } socket -= 1 - derivedFields[i].socket = fmt.Sprintf("%d", socket) + derivedFields[i].Socket = fmt.Sprintf("%d", socket) dimmNum, err := strconv.Atoi(match[2]) if err != nil { err := fmt.Errorf("failed to parse DIMM number: %s", match[2]) return nil, err } channel := (dimmNum - 1) / slotsPerChannel - derivedFields[i].channel = fmt.Sprintf("%d", channel) + derivedFields[i].Channel = fmt.Sprintf("%d", channel) var slot int if (dimmNum < channelsPerSocket && dimmNum%2 != 0) || (dimmNum > channelsPerSocket && dimmNum%2 == 0) { slot = 0 } else { slot = 1 } - derivedFields[i].slot = fmt.Sprintf("%d", slot) + derivedFields[i].Slot = fmt.Sprintf("%d", slot) } return derivedFields, nil } @@ -242,51 +297,51 @@ This method is inherently unreliable/incomplete as each OEM can set these fields as they see fit. Returns None when there's no match. */ -func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp, bankLocator string, locator string) (socket int, slot int, err error) { +func getDIMMSocketSlot(dimmType dimmType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp, bankLocator string, locator string) (socket int, slot int, err error) { switch dimmType { - case DIMMType0: + case dimmType0: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[3]) } return - case DIMMType1: + case dimmType1: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[3]) return } - case DIMMType2: + case dimmType2: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[3]) return } - case DIMMType3: + case dimmType3: match := reBankLoc.FindStringSubmatch(bankLocator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[3]) return } - case DIMMType4: + case dimmType4: match := reBankLoc.FindStringSubmatch(bankLocator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[4]) return } - case DIMMType5: + case dimmType5: match := reBankLoc.FindStringSubmatch(bankLocator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[3]) return } - case DIMMType6: + case dimmType6: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) @@ -295,7 +350,7 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex slot -= 1 return } - case DIMMType7: + case dimmType7: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) @@ -303,7 +358,7 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex slot -= 1 return } - case DIMMType8: + case dimmType8: match := reBankLoc.FindStringSubmatch(bankLocator) if match != nil { match2 := reLoc.FindStringSubmatch(locator) @@ -315,28 +370,28 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex return } } - case DIMMType9: + case dimmType9: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot, _ = strconv.Atoi(match[2]) return } - case DIMMType10: + case dimmType10: match := reBankLoc.FindStringSubmatch(bankLocator) if match != nil { socket = 0 slot, _ = strconv.Atoi(match[2]) return } - case DIMMType11: + case dimmType11: match := reLoc.FindStringSubmatch(locator) if match != nil { socket = 0 slot, _ = strconv.Atoi(match[2]) return } - case DIMMType12: + case dimmType12: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) @@ -345,7 +400,7 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex slot = slot - 1 return } - case DIMMType13: + case dimmType13: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) @@ -353,14 +408,14 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex slot = slot - 1 return } - case DIMMType14: + case dimmType14: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) slot = 0 return } - case DIMMType15: + case dimmType15: match := reLoc.FindStringSubmatch(locator) if match != nil { socket, _ = strconv.Atoi(match[1]) @@ -372,50 +427,50 @@ func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regex return } -type DIMMType int +type dimmType int const ( - DIMMTypeUNKNOWN = -1 - DIMMType0 DIMMType = iota - DIMMType1 - DIMMType2 - DIMMType3 - DIMMType4 - DIMMType5 - DIMMType6 - DIMMType7 - DIMMType8 - DIMMType9 - DIMMType10 - DIMMType11 - DIMMType12 - DIMMType13 - DIMMType14 - DIMMType15 + dimmTypeUNKNOWN = -1 + dimmType0 dimmType = iota + dimmType1 + dimmType2 + dimmType3 + dimmType4 + dimmType5 + dimmType6 + dimmType7 + dimmType8 + dimmType9 + dimmType10 + dimmType11 + dimmType12 + dimmType13 + dimmType14 + dimmType15 ) -func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp) { - dimmType = DIMMTypeUNKNOWN +func getDIMMParseInfo(bankLocator string, locator string) (dt dimmType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp) { + dt = dimmTypeUNKNOWN // Inspur ICX 2s system // Needs to be before next regex pattern to differentiate reLoc = regexp.MustCompile(`CPU([0-9])_C([0-9])D([0-9])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType0 + dt = dimmType0 return } reLoc = regexp.MustCompile(`CPU([0-9])_([A-Z])([0-9])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType1 + dt = dimmType1 return } reLoc = regexp.MustCompile(`CPU([0-9])_MC._DIMM_([A-Z])([0-9])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType2 + dt = dimmType2 return } reBankLoc = regexp.MustCompile(`NODE ([0-9]) CHANNEL ([0-9]) DIMM ([0-9])`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType3 + dt = dimmType3 return } /* Added for SuperMicro X13DET-B (SPR). Must be before Type4 because Type4 matches, but data in BankLoc is invalid. @@ -429,17 +484,17 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`P([1,2])-DIMM([A-L])([1,2])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType12 + dt = dimmType12 return } reBankLoc = regexp.MustCompile(`P([0-9])_Node([0-9])_Channel([0-9])_Dimm([0-9])`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType4 + dt = dimmType4 return } reBankLoc = regexp.MustCompile(`_Node([0-9])_Channel([0-9])_Dimm([0-9])`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType5 + dt = dimmType5 return } /* SKX SDP @@ -450,7 +505,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re if reLoc.FindStringSubmatch(locator) != nil { reBankLoc = regexp.MustCompile(`NODE ([1-8])`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType6 + dt = dimmType6 return } } @@ -462,7 +517,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re if reLoc.FindStringSubmatch(locator) != nil { reBankLoc = regexp.MustCompile(`NODE ([0-9]+)`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType7 + dt = dimmType7 return } } @@ -470,7 +525,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re if reBankLoc.FindStringSubmatch(bankLocator) != nil { reLoc = regexp.MustCompile(`DIMM_([A-Z])([1-9]\d*)`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType8 + dt = dimmType8 return } } @@ -483,7 +538,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`DIMM_P([0-1])_[A-Z]([0-1])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType9 + dt = dimmType9 return } /* my NUC @@ -492,7 +547,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reBankLoc = regexp.MustCompile(`CHANNEL ([A-D]) DIMM([0-9])`) if reBankLoc.FindStringSubmatch(bankLocator) != nil { - dimmType = DIMMType10 + dt = dimmType10 return } /* Alder Lake Client Desktop @@ -501,7 +556,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`Controller([0-1]).*DIMM([0-1])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType11 + dt = dimmType11 return } /* BIRCHSTREAM @@ -515,7 +570,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`CPU([\d])_DIMM_([A-H])([1-2])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType13 + dt = dimmType13 return } /* BIRCHSTREAM GRANITE RAPIDS AP/X3 @@ -529,7 +584,7 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`CPU([\d])_DIMM_([A-L])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType14 + dt = dimmType14 return } /* FOREST CITY PLATFORM FOR SRF AND GNR @@ -543,31 +598,31 @@ func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, re */ reLoc = regexp.MustCompile(`CPU([\d]) CH([0-7])/D([0-1])`) if reLoc.FindStringSubmatch(locator) != nil { - dimmType = DIMMType15 + dt = dimmType15 return } return } -func deriveDIMMInfoOther(dimms [][]string, channelsPerSocket int) ([]derivedFields, error) { - derivedFields := make([]derivedFields, len(dimms)) +func deriveDIMMInfoOther(dimms [][]string, channelsPerSocket int) ([]DerivedDIMMFields, error) { + derivedFields := make([]DerivedDIMMFields, len(dimms)) previousSocket, channel := -1, 0 if len(dimms) == 0 { err := fmt.Errorf("no DIMMs") return nil, err } - if len(dimms[0]) <= max(common.BankLocatorIdx, common.LocatorIdx) { + if len(dimms[0]) <= max(BankLocatorIdx, LocatorIdx) { err := fmt.Errorf("DIMM data has insufficient fields") return nil, err } - dimmType, reBankLoc, reLoc := getDIMMParseInfo(dimms[0][common.BankLocatorIdx], dimms[0][common.LocatorIdx]) - if dimmType == DIMMTypeUNKNOWN { + dt, reBankLoc, reLoc := getDIMMParseInfo(dimms[0][BankLocatorIdx], dimms[0][LocatorIdx]) + if dt == dimmTypeUNKNOWN { err := fmt.Errorf("unknown DIMM identification format") return nil, err } for i, dimm := range dimms { var socket, slot int - socket, slot, err := getDIMMSocketSlot(dimmType, reBankLoc, reLoc, dimm[common.BankLocatorIdx], dimm[common.LocatorIdx]) + socket, slot, err := getDIMMSocketSlot(dt, reBankLoc, reLoc, dimm[BankLocatorIdx], dimm[LocatorIdx]) if err != nil { slog.Info("Couldn't extract socket and slot from DIMM info", slog.String("error", err.Error())) return nil, nil @@ -583,9 +638,28 @@ func deriveDIMMInfoOther(dimms [][]string, channelsPerSocket int) ([]derivedFiel return nil, err } previousSocket = socket - derivedFields[i].socket = fmt.Sprintf("%d", socket) - derivedFields[i].channel = fmt.Sprintf("%d", channel) - derivedFields[i].slot = fmt.Sprintf("%d", slot) + derivedFields[i].Socket = fmt.Sprintf("%d", socket) + derivedFields[i].Channel = fmt.Sprintf("%d", channel) + derivedFields[i].Slot = fmt.Sprintf("%d", slot) } return derivedFields, nil } + +// DimmInfoFromDmiDecode extracts DIMM information from DMI decode output. +func DimmInfoFromDmiDecode(dmiDecodeOutput string) [][]string { + return ValsArrayFromDmiDecodeRegexSubmatch( + dmiDecodeOutput, + "17", + `^Bank Locator:\s*(.+?)$`, + `^Locator:\s*(.+?)$`, + `^Manufacturer:\s*(.+?)$`, + `^Part Number:\s*(.+?)\s*$`, + `^Serial Number:\s*(.+?)\s*$`, + `^Size:\s*(.+?)$`, + `^Type:\s*(.+?)$`, + `^Type Detail:\s*(.+?)$`, + `^Speed:\s*(.+?)$`, + `^Rank:\s*(.+?)$`, + `^Configured.*Speed:\s*(.+?)$`, + ) +} diff --git a/internal/extract/extract.go b/internal/extract/extract.go new file mode 100644 index 00000000..c828bd56 --- /dev/null +++ b/internal/extract/extract.go @@ -0,0 +1,174 @@ +// Package extract provides helper functions for extracting values from script outputs +// to populate table fields for reports. +package extract + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "log/slog" + "regexp" + "strings" +) + +// ValFromRegexSubmatch searches for a regex pattern in the given output string and returns the first captured group. +// If no match is found, an empty string is returned. +func ValFromRegexSubmatch(output string, regex string) string { + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + return match[1] + } + } + return "" +} + +// ValsFromRegexSubmatch extracts the captured groups from each line in the output +// that matches the given regular expression. +// It returns a slice of strings containing the captured values. +func ValsFromRegexSubmatch(output string, regex string) []string { + var vals []string + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + vals = append(vals, match[1]) + } + } + return vals +} + +// ValsArrayFromRegexSubmatch returns all matches for all capture groups in regex +func ValsArrayFromRegexSubmatch(output string, regex string) (vals [][]string) { + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + vals = append(vals, match[1:]) + } + } + return +} + +// ValFromDmiDecodeRegexSubmatch extracts a value from the DMI decode output using a regular expression. +// It takes the DMI decode output, the DMI type, and the regular expression as input parameters. +// It returns the extracted value as a string. +func ValFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regex string) string { + return ValFromRegexSubmatch(GetDmiDecodeType(dmiDecodeOutput, dmiType), regex) +} + +// ValsArrayFromDmiDecodeRegexSubmatch extracts multiple values from DMI decode entries. +func ValsArrayFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regexes ...string) (vals [][]string) { + var res []*regexp.Regexp + for _, r := range regexes { + re := regexp.MustCompile(r) + res = append(res, re) + } + for _, entry := range GetDmiDecodeEntries(dmiDecodeOutput, dmiType) { + row := make([]string, len(res)) + for _, line := range entry { + for i, re := range res { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + row[i] = match[1] + } + } + } + vals = append(vals, row) + } + return +} + +// GetDmiDecodeType extracts the lines from the given `dmiDecodeOutput` that belong to the specified `dmiType`. +func GetDmiDecodeType(dmiDecodeOutput string, dmiType string) string { + var lines []string + start := false + for line := range strings.SplitSeq(dmiDecodeOutput, "\n") { + if start && strings.HasPrefix(line, "Handle ") { + start = false + } + if strings.Contains(line, "DMI type "+dmiType+",") { + start = true + } + if start { + lines = append(lines, line) + } + } + return strings.Join(lines, "\n") +} + +// GetDmiDecodeEntries extracts the entries from the given `dmiDecodeOutput` that belong to the specified `dmiType`. +func GetDmiDecodeEntries(dmiDecodeOutput string, dmiType string) (entries [][]string) { + lines := strings.Split(dmiDecodeOutput, "\n") + var entry []string + typeMatch := false + for _, line := range lines { + if strings.HasPrefix(line, "Handle ") { + if strings.Contains(line, "DMI type "+dmiType+",") { + // type match + typeMatch = true + entry = []string{} + } else { + // not a type match + typeMatch = false + } + } + if !typeMatch { + continue + } + if line == "" { + // end of type match entry + entries = append(entries, entry) + } else { + // a line in the entry + entry = append(entry, line) + } + } + return +} + +// GetSectionsFromOutput parses output into sections, where the section name +// is the key in a map and the section content is the value +// sections are delimited by lines of the form ##########
########## +func GetSectionsFromOutput(output string) map[string]string { + sections := make(map[string]string) + re := regexp.MustCompile(`^########## (.+?) ##########$`) + var sectionName string + for line := range strings.SplitSeq(output, "\n") { + // check if the line is a section header + match := re.FindStringSubmatch(line) + if match != nil { + // if the section name isn't in the map yet, add it + if _, ok := sections[match[1]]; !ok { + sections[match[1]] = "" + } + // save the section name + sectionName = match[1] + continue + } + if sectionName != "" { + sections[sectionName] += line + "\n" + } + } + return sections +} + +// SectionValueFromOutput returns the content of a section from the output +// if the section doesn't exist, returns an empty string +func SectionValueFromOutput(output string, sectionName string) string { + sections := GetSectionsFromOutput(output) + if len(sections) == 0 { + slog.Warn("no sections in output") + return "" + } + if _, ok := sections[sectionName]; !ok { + slog.Warn("section not found in output", slog.String("section", sectionName)) + return "" + } + if sections[sectionName] == "" { + slog.Warn("No content for section:", slog.String("section", sectionName)) + return "" + } + return sections[sectionName] +} diff --git a/internal/extract/extract_test.go b/internal/extract/extract_test.go new file mode 100644 index 00000000..4afbacd5 --- /dev/null +++ b/internal/extract/extract_test.go @@ -0,0 +1,157 @@ +package extract + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "reflect" + "testing" +) + +func TestGetSectionsFromOutput(t *testing.T) { + tests := []struct { + name string + output string + want map[string]string + }{ + { + name: "Valid sections with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2 +########## Section C ########## +Content C1`, + want: map[string]string{ + "Section A": "Content A1\nContent A2\n", + "Section B": "Content B1\nContent B2\n", + "Section C": "Content C1\n", + }, + }, + { + name: "Valid sections with empty content", + output: `########## Section A ########## +########## Section B ########## +########## Section C ##########`, + want: map[string]string{ + "Section A": "", + "Section B": "", + "Section C": "", + }, + }, + { + name: "No sections", + output: "No section headers here", + want: map[string]string{}, + }, + { + name: "Empty output", + output: ``, + want: map[string]string{}, + }, + { + name: "Empty lines in output", + output: "\n\n\n", + want: map[string]string{}, + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + want: map[string]string{ + "Section A": "\nContent A1\n\n", + "Section B": "Content B1\n", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := GetSectionsFromOutput(tt.output) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestSectionValueFromOutput(t *testing.T) { + tests := []struct { + name string + output string + sectionName string + want string + }{ + { + name: "Section A exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section A", + want: "Content A1\nContent A2\n", + }, + { + name: "Section B exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section B", + want: "Content B1\nContent B2\n", + }, + { + name: "Section exists with no content", + output: `########## Section A ########## +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "", + }, + { + name: "Section does not exist", + output: `########## Section A ########## +Content A1 +########## Section B ########## +Content B1`, + sectionName: "Section C", + want: "", + }, + { + name: "Empty output", + output: "", + sectionName: "Section A", + want: "", + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "\nContent A1\n\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := SectionValueFromOutput(tt.output, tt.sectionName) + if got != tt.want { + t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/common/frequency.go b/internal/extract/frequency.go similarity index 92% rename from internal/common/frequency.go rename to internal/extract/frequency.go index 848368fd..f9c4a824 100644 --- a/internal/common/frequency.go +++ b/internal/extract/frequency.go @@ -1,19 +1,20 @@ -package common +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package extract import ( "fmt" "log/slog" - "perfspect/internal/cpus" - "perfspect/internal/script" - "perfspect/internal/util" "regexp" "slices" "strconv" "strings" -) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/util" +) // BaseFrequencyFromOutput gets base core frequency // @@ -54,7 +55,7 @@ func BaseFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { return "" } -// getFrequenciesFromHex +// getFrequenciesFromHex converts hex string to frequency list func getFrequenciesFromHex(hex string) ([]int, error) { freqs, err := util.HexToIntList(hex) if err != nil { @@ -65,7 +66,7 @@ func getFrequenciesFromHex(hex string) ([]int, error) { return freqs, nil } -// getBucketSizesFromHex +// getBucketSizesFromHex converts hex string to bucket sizes func getBucketSizesFromHex(hex string) ([]int, error) { bucketSizes, err := util.HexToIntList(hex) if err != nil { @@ -81,7 +82,6 @@ func getBucketSizesFromHex(hex string) ([]int, error) { } // padFrequencies adds items to the frequencies slice until it reaches the desired length. -// The value of the added items is the same as the last item in the original slice. func padFrequencies(freqs []int, desiredLength int) ([]int, error) { if len(freqs) == 0 { return nil, fmt.Errorf("cannot pad empty frequencies slice") @@ -93,24 +93,12 @@ func padFrequencies(freqs []int, desiredLength int) ([]int, error) { } // GetSpecFrequencyBuckets gets the core frequency buckets from the script output -// returns slice of rows -// first row is header -// each row is a slice of strings -// "cores", "cores per die", "sse", "avx2", "avx512", "avx512h", "amx" -// "0-41", "0-20", "3.5", "3.5", "3.3", "3.2", "3.1" -// "42-63", "21-31", "3.5", "3.5", "3.3", "3.2", "3.1" -// "64-85", "32-43", "3.5", "3.5", "3.3", "3.2", "3.1" -// ... -// the "cores per die" column is only present for some architectures func GetSpecFrequencyBuckets(outputs map[string]script.ScriptOutput) ([][]string, error) { arch := UarchFromOutput(outputs) if arch == "" { return nil, fmt.Errorf("uarch is required") } out := outputs[script.SpecCoreFrequenciesScriptName].Stdout - // expected script output format, the number of fields may vary: - // "cores sse avx2 avx512 avx512h amx" - // "hex hex hex hex hex hex" if out == "" { return nil, fmt.Errorf("no core frequencies found") } @@ -232,12 +220,6 @@ func GetSpecFrequencyBuckets(outputs map[string]script.ScriptOutput) ([][]string } // ExpandTurboFrequencies expands the turbo frequencies to a list of frequencies -// input is the output of getSpecFrequencyBuckets, e.g.: -// "cores", "cores per die", "sse", "avx2", "avx512", "avx512h", "amx" -// "0-41", "0-20", "3.5", "3.5", "3.3", "3.2", "3.1" -// "42-63", "21-31", "3.5", "3.5", "3.3", "3.2", "3.1" -// ... -// output is the expanded list of the frequencies for the requested ISA func ExpandTurboFrequencies(specFrequencyBuckets [][]string, isa string) ([]string, error) { if len(specFrequencyBuckets) < 2 || len(specFrequencyBuckets[0]) < 2 { return nil, fmt.Errorf("unable to parse core frequency buckets") @@ -284,6 +266,7 @@ func MaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { return "" } +// GetSSEFreqsFromBuckets extracts SSE frequencies from frequency buckets. func GetSSEFreqsFromBuckets(buckets [][]string) []string { if len(buckets) < 2 { return nil @@ -309,6 +292,7 @@ func GetSSEFreqsFromBuckets(buckets [][]string) []string { return sse } +// AllCoreMaxFrequencyFromOutput returns the all-core max frequency. func AllCoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { specCoreFrequencies, err := GetSpecFrequencyBuckets(outputs) if err != nil { @@ -322,6 +306,7 @@ func AllCoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) strin return sseFreqs[len(sseFreqs)-1] + "GHz" } +// UncoreMinMaxDieFrequencyFromOutput returns uncore min/max frequency for a specific die type. func UncoreMinMaxDieFrequencyFromOutput(maxFreq bool, computeDie bool, outputs map[string]script.ScriptOutput) string { // find the first die that matches requrested die type (compute or I/O) re := regexp.MustCompile(`Read bits \d+:\d+ value (\d+) from TPMI ID .* for entry (\d+) in instance (\d+)`) @@ -379,6 +364,7 @@ func UncoreMinMaxDieFrequencyFromOutput(maxFreq bool, computeDie bool, outputs m return fmt.Sprintf("%.1fGHz", float64(parsed)/10) } +// UncoreMinMaxFrequencyFromOutput returns uncore min/max frequency from MSR. func UncoreMinMaxFrequencyFromOutput(maxFreq bool, outputs map[string]script.ScriptOutput) string { var parsed int64 var err error @@ -402,10 +388,12 @@ func UncoreMinMaxFrequencyFromOutput(maxFreq bool, outputs map[string]script.Scr return fmt.Sprintf("%.1fGHz", float64(parsed)/10) } +// UncoreMinFrequencyFromOutput returns uncore min frequency. func UncoreMinFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { return UncoreMinMaxFrequencyFromOutput(false, outputs) } +// UncoreMaxFrequencyFromOutput returns uncore max frequency. func UncoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { return UncoreMinMaxFrequencyFromOutput(true, outputs) } diff --git a/internal/common/frequency_test.go b/internal/extract/frequency_test.go similarity index 99% rename from internal/common/frequency_test.go rename to internal/extract/frequency_test.go index 1483a513..8250225f 100644 --- a/internal/common/frequency_test.go +++ b/internal/extract/frequency_test.go @@ -1,4 +1,4 @@ -package common +package extract // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause diff --git a/cmd/report/gpu.go b/internal/extract/gpu.go similarity index 87% rename from cmd/report/gpu.go rename to internal/extract/gpu.go index e507fde3..5b1b5db2 100644 --- a/cmd/report/gpu.go +++ b/internal/extract/gpu.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package report +package extract import ( "log/slog" @@ -10,7 +10,6 @@ import ( "strconv" "strings" - "perfspect/internal/common" "perfspect/internal/script" ) @@ -21,13 +20,15 @@ import ( // // The devid field will be interpreted as a regular expression. +// GPUDefinition represents an Intel GPU device definition. type GPUDefinition struct { Model string MfgID string DevID string } -var gpuDefinitions = []GPUDefinition{ +// GPUDefinitions contains all known Intel GPU definitions. +var GPUDefinitions = []GPUDefinition{ { Model: "ATS-P", MfgID: "8086", @@ -135,22 +136,24 @@ var gpuDefinitions = []GPUDefinition{ }, } +// GPU represents a graphics processing unit found in the system. type GPU struct { Manufacturer string Model string PCIID string } -func gpuInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { +// GPUInfoFromOutput returns GPU information from lshw output. +func GPUInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { gpus := []GPU{} - gpusLshw := common.ValsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) + gpusLshw := ValsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) idxMfgName := 0 idxMfgID := 1 idxDevID := 2 for _, gpu := range gpusLshw { // Find GPU in GPU defs, note the model var model string - for _, intelGPU := range gpuDefinitions { + for _, intelGPU := range GPUDefinitions { if gpu[idxMfgID] == intelGPU.MfgID { model = intelGPU.Model break @@ -173,6 +176,7 @@ func gpuInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { return gpus } +// Gaudi represents an Intel Gaudi accelerator. type Gaudi struct { ModuleID string Microarchitecture string @@ -185,7 +189,8 @@ type Gaudi struct { NUMA string } -func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { +// GaudiInfoFromOutput returns Gaudi accelerator information from script output. +func GaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { gaudis := []Gaudi{} for i, line := range strings.Split(outputs[script.GaudiInfoScriptName].Stdout, "\n") { if line == "" || i == 0 { // skip blank lines and header @@ -207,7 +212,7 @@ func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { gaudis[i].Microarchitecture = strings.TrimSpace(outputs[script.GaudiArchitectureScriptName].Stdout) } // get NUMA affinity - numaAffinities := common.ValsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) + numaAffinities := ValsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) if len(numaAffinities) != len(gaudis) { slog.Error("number of gaudis in gaudi info and numa output do not match", slog.Int("gaudis", len(gaudis)), slog.Int("numaAffinities", len(numaAffinities))) return nil @@ -281,8 +286,8 @@ func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { return gaudis } -// return all PCI Devices of specified class -func getPCIDevices(class string, outputs map[string]script.ScriptOutput) (devices []map[string]string) { +// GetPCIDevices returns all PCI Devices of specified class from lspci output. +func GetPCIDevices(class string, outputs map[string]script.ScriptOutput) (devices []map[string]string) { device := make(map[string]string) re := regexp.MustCompile(`^(\w+):\s+(.*)$`) for line := range strings.SplitSeq(outputs[script.LspciVmmScriptName].Stdout, "\n") { diff --git a/cmd/report/isa.go b/internal/extract/isa.go similarity index 77% rename from cmd/report/isa.go rename to internal/extract/isa.go index b804a48d..bf41654e 100644 --- a/cmd/report/isa.go +++ b/internal/extract/isa.go @@ -1,20 +1,21 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package report +package extract import ( - "perfspect/internal/common" "perfspect/internal/script" ) +// ISA represents an instruction set architecture extension. type ISA struct { Name string FullName string CPUID string } -var isas = []ISA{ +// ISADefinitions contains all known ISA extension definitions. +var ISADefinitions = []ISA{ {"AES", "Advanced Encryption Standard New Instructions (AES-NI)", "AES instruction"}, {"AMX", "Advanced Matrix Extensions (AMX)", "AMX-BF16: tile bfloat16 support"}, {"AMX-COMPLEX", "AMX-COMPLEX Instruction", "AMX-COMPLEX instructions"}, @@ -39,25 +40,28 @@ var isas = []ISA{ {"WAITPKG", "UMONITOR, UMWAIT, TPAUSE Instructions", "WAITPKG instructions"}, } -func isaFullNames() []string { +// ISAFullNames returns the full names of all ISA extensions. +func ISAFullNames() []string { var names []string - for _, isa := range isas { + for _, isa := range ISADefinitions { names = append(names, isa.FullName) } return names } -func yesIfTrue(val string) string { +// YesIfTrue converts a boolean string to "Yes" or "No". +func YesIfTrue(val string) string { if val == "true" { return "Yes" } return "No" } -func isaSupportedFromOutput(outputs map[string]script.ScriptOutput) []string { +// ISASupportedFromOutput returns ISA support status from cpuid output. +func ISASupportedFromOutput(outputs map[string]script.ScriptOutput) []string { var supported []string - for _, isa := range isas { - oneSupported := yesIfTrue(common.ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) + for _, isa := range ISADefinitions { + oneSupported := YesIfTrue(ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) supported = append(supported, oneSupported) } return supported diff --git a/internal/common/nic.go b/internal/extract/nic.go similarity index 83% rename from internal/common/nic.go rename to internal/extract/nic.go index 58ef009d..faecfae9 100644 --- a/internal/common/nic.go +++ b/internal/extract/nic.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package common +package extract import ( "fmt" @@ -13,7 +13,8 @@ import ( "perfspect/internal/script" ) -type nicInfo struct { +// NicInfo represents network interface information. +type NicInfo struct { Name string Vendor string VendorID string @@ -42,13 +43,14 @@ type nicInfo struct { RPSCPUs map[string]string } -func ParseNicInfo(scriptOutput string) []nicInfo { - var nics []nicInfo +// ParseNicInfo parses NIC information from script output. +func ParseNicInfo(scriptOutput string) []NicInfo { + var nics []NicInfo for nicOutput := range strings.SplitSeq(scriptOutput, "----------------------------------------") { if strings.TrimSpace(nicOutput) == "" { continue } - var nic nicInfo + var nic NicInfo nic.XPSCPUs = make(map[string]string) nic.RPSCPUs = make(map[string]string) // Map of prefixes to field pointers @@ -131,18 +133,14 @@ func hexBitmapToCPUList(hexBitmap string) string { } // Remove commas to form a single continuous hex string. - // This assumes the comma-separated parts are in big-endian order. fullHexBitmap := strings.ReplaceAll(hexBitmap, ",", "") i := new(big.Int) - // The string is a hex string, so the base is 16. if _, success := i.SetString(fullHexBitmap, 16); !success { - // If parsing fails, it might not be a hex string. Return as is. return hexBitmap } var cpus []string - // Iterate through the bits of the big integer. for bit := 0; bit < i.BitLen(); bit++ { if i.Bit(bit) == 1 { cpus = append(cpus, fmt.Sprintf("%d", bit)) @@ -155,56 +153,43 @@ func hexBitmapToCPUList(hexBitmap string) string { } // assignCardAndPort assigns card and port numbers to NICs based on their PCI addresses -func assignCardAndPort(nics []nicInfo) { +func assignCardAndPort(nics []NicInfo) { if len(nics) == 0 { return } - // Map to store card identifiers (domain:bus:device) to card numbers cardMap := make(map[string]int) - // Map to track ports within each card - portMap := make(map[string][]int) // card identifier -> list of indices in nics slice + portMap := make(map[string][]int) cardCounter := 1 - // First pass: identify cards and group NICs by card for i := range nics { if nics[i].Bus == "" { continue } - // PCI address format: domain:bus:device.function (e.g., 0000:32:00.0) - // Extract domain:bus:device as the card identifier parts := strings.Split(nics[i].Bus, ":") if len(parts) != 3 { continue } - // Further split the last part to separate device from function deviceFunc := strings.Split(parts[2], ".") if len(deviceFunc) < 1 { continue } - // Card identifier is domain:bus:device cardID := parts[0] + ":" + parts[1] + ":" + deviceFunc[0] - // Assign card number if not already assigned if _, exists := cardMap[cardID]; !exists { cardMap[cardID] = cardCounter cardCounter++ } - // Add this NIC index to the card's port list portMap[cardID] = append(portMap[cardID], i) } - // Second pass: assign card and port numbers for cardID, nicIndices := range portMap { cardNum := cardMap[cardID] - // Sort NICs within a card by their function number sort.Slice(nicIndices, func(i, j int) bool { - // Extract function numbers funcI := extractFunction(nics[nicIndices[i]].Bus) funcJ := extractFunction(nics[nicIndices[j]].Bus) return funcI < funcJ }) - // Assign port numbers for portNum, nicIdx := range nicIndices { nics[nicIdx].Card = fmt.Sprintf("%d", cardNum) nics[nicIdx].Port = fmt.Sprintf("%d", portNum+1) @@ -212,7 +197,6 @@ func assignCardAndPort(nics []nicInfo) { } } -// extractFunction extracts the function number from a PCI address func extractFunction(busAddr string) int { parts := strings.Split(busAddr, ".") if len(parts) != 2 { @@ -225,6 +209,7 @@ func extractFunction(busAddr string) int { return funcNum } +// NICIrqMappingsFromOutput returns NIC IRQ to CPU affinity mappings. func NICIrqMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string { nics := ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(nics) == 0 { @@ -233,7 +218,7 @@ func NICIrqMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string nicIRQMappings := [][]string{} for _, nic := range nics { if nic.CPUAffinity == "" { - continue // skip NICs without CPU affinity + continue } affinities := strings.Split(strings.TrimSuffix(nic.CPUAffinity, ";"), ";") nicIRQMappings = append(nicIRQMappings, []string{nic.Name, strings.Join(affinities, " | ")}) @@ -241,6 +226,7 @@ func NICIrqMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string return nicIRQMappings } +// NICSummaryFromOutput returns a summary of installed NICs. func NICSummaryFromOutput(outputs map[string]script.ScriptOutput) string { nics := ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(nics) == 0 { diff --git a/internal/common/nic_test.go b/internal/extract/nic_test.go similarity index 98% rename from internal/common/nic_test.go rename to internal/extract/nic_test.go index 2e35a191..130a4a23 100644 --- a/internal/common/nic_test.go +++ b/internal/extract/nic_test.go @@ -1,4 +1,4 @@ -package common +package extract // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -10,12 +10,12 @@ import ( func TestAssignCardAndPort(t *testing.T) { tests := []struct { name string - nics []nicInfo + nics []NicInfo expected map[string]string // map of NIC name to expected "Card / Port" }{ { name: "Two cards with two ports each", - nics: []nicInfo{ + nics: []NicInfo{ {Name: "eth2", Bus: "0000:32:00.0"}, {Name: "eth3", Bus: "0000:32:00.1"}, {Name: "eth0", Bus: "0000:c0:00.0"}, @@ -30,7 +30,7 @@ func TestAssignCardAndPort(t *testing.T) { }, { name: "Single card with four ports", - nics: []nicInfo{ + nics: []NicInfo{ {Name: "eth0", Bus: "0000:19:00.0"}, {Name: "eth1", Bus: "0000:19:00.1"}, {Name: "eth2", Bus: "0000:19:00.2"}, @@ -45,7 +45,7 @@ func TestAssignCardAndPort(t *testing.T) { }, { name: "Three different cards", - nics: []nicInfo{ + nics: []NicInfo{ {Name: "eth0", Bus: "0000:19:00.0"}, {Name: "eth1", Bus: "0000:1a:00.0"}, {Name: "eth2", Bus: "0000:1b:00.0"}, @@ -58,7 +58,7 @@ func TestAssignCardAndPort(t *testing.T) { }, { name: "Empty bus address should not assign card/port", - nics: []nicInfo{ + nics: []NicInfo{ {Name: "eth0", Bus: ""}, }, expected: map[string]string{ @@ -118,7 +118,7 @@ version: 5.1.0-k firmware-version: 0x800009e0 MAC Address: aa:bb:cc:dd:ee:00 NUMA Node: 0 -CPU Affinity: +CPU Affinity: IRQ Balance: Enabled rx-usecs: 1 tx-usecs: 1 @@ -137,7 +137,7 @@ version: 5.1.0-k firmware-version: 0x800009e0 MAC Address: aa:bb:cc:dd:ee:01 NUMA Node: 0 -CPU Affinity: +CPU Affinity: IRQ Balance: Enabled rx-usecs: 1 tx-usecs: 1 @@ -156,7 +156,7 @@ version: K_5.19.0-41-generic_5.1.9 firmware-version: 4.40 0x8001c967 1.3534.0 MAC Address: aa:bb:cc:dd:ee:82 NUMA Node: 1 -CPU Affinity: +CPU Affinity: IRQ Balance: Enabled rx-usecs: 1 tx-usecs: 1 @@ -175,7 +175,7 @@ version: K_5.19.0-41-generic_5.1.9 firmware-version: 4.40 0x8001c967 1.3534.0 MAC Address: aa:bb:cc:dd:ee:83 NUMA Node: 1 -CPU Affinity: +CPU Affinity: IRQ Balance: Enabled rx-usecs: 1 tx-usecs: 1 diff --git a/internal/extract/os.go b/internal/extract/os.go new file mode 100644 index 00000000..cddabebc --- /dev/null +++ b/internal/extract/os.go @@ -0,0 +1,16 @@ +package extract + +import "perfspect/internal/script" + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +// OperatingSystemFromOutput returns the operating system from script outputs. +func OperatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { + os := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) + centos := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) + if centos != "" { + os = centos + } + return os +} diff --git a/internal/common/power.go b/internal/extract/power.go similarity index 88% rename from internal/common/power.go rename to internal/extract/power.go index cb08093c..7f0d9983 100644 --- a/internal/common/power.go +++ b/internal/extract/power.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package common +package extract import ( "encoding/csv" @@ -15,25 +15,15 @@ import ( ) // EPPFromOutput gets EPP value from script outputs -// IF 0x774[42] is '1' AND 0x774[60] is '0' -// THEN -// -// get EPP from 0x772 (package) -// -// ELSE -// -// get EPP from 0x774 (per core) func EPPFromOutput(outputs map[string]script.ScriptOutput) string { - // if we couldn't get the EPP values, return empty string if outputs[script.EppValidScriptName].Exitcode != 0 || len(outputs[script.EppValidScriptName].Stdout) == 0 || outputs[script.EppPackageControlScriptName].Exitcode != 0 || len(outputs[script.EppPackageControlScriptName].Stdout) == 0 || outputs[script.EppPackageScriptName].Exitcode != 0 || len(outputs[script.EppPackageScriptName].Stdout) == 0 { slog.Warn("EPP scripts failed or produced no output") return "" } - // check if the epp valid bit is set and consistent across all cores var eppValid string - for i, line := range strings.Split(outputs[script.EppValidScriptName].Stdout, "\n") { // MSR 0x774, bit 60 + for i, line := range strings.Split(outputs[script.EppValidScriptName].Stdout, "\n") { if line == "" { continue } @@ -51,9 +41,8 @@ func EPPFromOutput(outputs map[string]script.ScriptOutput) string { return "inconsistent" } } - // check if epp package control bit is set and consistent across all cores var eppPkgCtrl string - for i, line := range strings.Split(outputs[script.EppPackageControlScriptName].Stdout, "\n") { // MSR 0x774, bit 42 + for i, line := range strings.Split(outputs[script.EppPackageControlScriptName].Stdout, "\n") { if line == "" { continue } @@ -72,7 +61,7 @@ func EPPFromOutput(outputs map[string]script.ScriptOutput) string { } } if eppPkgCtrl == "1" && eppValid == "0" { - eppPackage := strings.TrimSpace(outputs[script.EppPackageScriptName].Stdout) // MSR 0x772, bits 24-31 (package) + eppPackage := strings.TrimSpace(outputs[script.EppPackageScriptName].Stdout) msr, err := strconv.ParseInt(eppPackage, 16, 0) if err != nil { slog.Error("failed to parse EPP package value", slog.String("error", err.Error()), slog.String("epp", eppPackage)) @@ -81,7 +70,7 @@ func EPPFromOutput(outputs map[string]script.ScriptOutput) string { return eppValToLabel(int(msr)) } else { var epp string - for i, line := range strings.Split(outputs[script.EppScriptName].Stdout, "\n") { // MSR 0x774, bits 24-31 (per-core) + for i, line := range strings.Split(outputs[script.EppScriptName].Stdout, "\n") { if line == "" { continue } @@ -122,6 +111,7 @@ func EPBFromOutput(outputs map[string]script.ScriptOutput) string { return epbValToLabel(int(msr)) } +// ELCSummaryFromOutput returns a summary of Efficiency Latency Control settings. func ELCSummaryFromOutput(outputs map[string]script.ScriptOutput) string { fieldValues := ELCFieldValuesFromOutput(outputs) if len(fieldValues) < 10 || len(fieldValues[9].Values) == 0 { @@ -136,6 +126,7 @@ func ELCSummaryFromOutput(outputs map[string]script.ScriptOutput) string { return summary } +// C6FromOutput returns the C6 C-state status. func C6FromOutput(outputs map[string]script.ScriptOutput) string { cstatesInfo := CstatesFromOutput(outputs) if cstatesInfo == nil { @@ -149,6 +140,7 @@ func C6FromOutput(outputs map[string]script.ScriptOutput) string { return "" } +// CstatesSummaryFromOutput returns a summary of all C-state statuses. func CstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { cstatesInfo := CstatesFromOutput(outputs) if cstatesInfo == nil { @@ -161,11 +153,13 @@ func CstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { return strings.Join(summaryParts, ", ") } +// CstateInfo represents a C-state name and status. type CstateInfo struct { Name string Status string } +// CstatesFromOutput extracts C-state information from script outputs. func CstatesFromOutput(outputs map[string]script.ScriptOutput) []CstateInfo { var cstatesInfo []CstateInfo output := outputs[script.CstatesScriptName].Stdout @@ -182,6 +176,7 @@ func CstatesFromOutput(outputs map[string]script.ScriptOutput) []CstateInfo { return cstatesInfo } +// ELCFieldValuesFromOutput extracts Efficiency Latency Control field values. func ELCFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValues []table.Field) { if outputs[script.ElcScriptName].Stdout == "" { return @@ -194,19 +189,15 @@ func ELCFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValu if len(rows) < 2 { return } - // first row is headers for fieldNamesIndex, fieldName := range rows[0] { values := []string{} - // value rows for _, row := range rows[1:] { values = append(values, row[fieldNamesIndex]) } fieldValues = append(fieldValues, table.Field{Name: fieldName, Values: values}) } - // let's add an interpretation of the values in an additional column values := []string{} - // value rows for _, row := range rows[1:] { var mode string if len(row) > 7 && row[2] == "IO" { @@ -217,7 +208,7 @@ func ELCFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValu } else { mode = "Custom" } - } else if len(row) > 5 { // COMPUTE + } else if len(row) > 5 { switch row[5] { case "0": mode = "Latency Optimized" @@ -233,6 +224,16 @@ func ELCFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValu return } +// TDPFromOutput returns the TDP (Thermal Design Power) from script outputs. +func TDPFromOutput(outputs map[string]script.ScriptOutput) string { + msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err != nil || msr == 0 { + return "" + } + return fmt.Sprint(msr/8) + "W" +} + func epbValToLabel(msr int) string { var val string if msr >= 0 && msr <= 3 { diff --git a/internal/common/prefetcher.go b/internal/extract/prefetcher.go similarity index 93% rename from internal/common/prefetcher.go rename to internal/extract/prefetcher.go index 9b9719d5..ef9f5076 100644 --- a/internal/common/prefetcher.go +++ b/internal/extract/prefetcher.go @@ -1,34 +1,27 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package common +package extract import ( "fmt" "log/slog" - "perfspect/internal/cpus" - "perfspect/internal/script" "slices" "strconv" "strings" -) -// prefetchers are enabled when associated bit in msr is 0 - -type PrefetcherDefinition struct { - ShortName string - Description string - Msr int - Bit int - Uarchs []string -} + "perfspect/internal/cpus" + "perfspect/internal/script" +) +// MSR addresses for prefetcher control const ( MsrPrefetchControl = 0x1a4 MsrPrefetchers = 0x6d MsrAtomPrefTuning1 = 0x1320 ) +// Prefetcher short names const ( PrefetcherL2HWName = "L2 HW" PrefetcherL2AdjName = "L2 Adj" @@ -43,6 +36,16 @@ const ( PrefetcherLLCStreamName = "LLC Stream" ) +// PrefetcherDefinition represents a prefetcher configuration. +type PrefetcherDefinition struct { + ShortName string + Description string + Msr int + Bit int + Uarchs []string +} + +// PrefetcherDefinitions contains all known prefetcher definitions. var PrefetcherDefinitions = []PrefetcherDefinition{ { ShortName: PrefetcherL2HWName, @@ -124,7 +127,6 @@ var PrefetcherDefinitions = []PrefetcherDefinition{ } // GetPrefetcherDefByName returns the Prefetcher definition by its short name. -// It returns error if the Prefetcher is not found. func GetPrefetcherDefByName(name string) (PrefetcherDefinition, error) { for _, p := range PrefetcherDefinitions { if p.ShortName == name { @@ -139,6 +141,7 @@ func GetPrefetcherDefinitions() []PrefetcherDefinition { return PrefetcherDefinitions } +// IsPrefetcherEnabled checks if a prefetcher is enabled based on MSR value and bit position. func IsPrefetcherEnabled(msrValue string, bit int) (bool, error) { if msrValue == "" { return false, fmt.Errorf("msrValue is empty") @@ -152,11 +155,11 @@ func IsPrefetcherEnabled(msrValue string, bit int) (bool, error) { return bitMask&msrInt == 0, nil } +// PrefetchersFromOutput extracts prefetcher status from script outputs. func PrefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { out := make([][]string, 0) uarch := UarchFromOutput(outputs) if uarch == "" { - // uarch is required return [][]string{} } for _, pf := range PrefetcherDefinitions { @@ -194,10 +197,10 @@ func PrefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { return out } +// PrefetchersSummaryFromOutput returns a summary of all prefetcher statuses. func PrefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string { uarch := UarchFromOutput(outputs) if uarch == "" { - // uarch is required return "" } var prefList []string diff --git a/cmd/report/security.go b/internal/extract/security.go similarity index 62% rename from cmd/report/security.go rename to internal/extract/security.go index c815a0b4..b6858409 100644 --- a/cmd/report/security.go +++ b/internal/extract/security.go @@ -1,21 +1,21 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package report +package extract import ( "fmt" "sort" "strings" - "perfspect/internal/common" "perfspect/internal/script" ) -func cveInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { +// CVEInfoFromOutput returns CVE vulnerability information from spectre-meltdown-checker output. +func CVEInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { vulns := make(map[string]string) // from spectre-meltdown-checker - for _, pair := range common.ValsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { + for _, pair := range ValsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { vulns[pair[0]] = pair[1] } // sort the vulnerabilities by CVE ID @@ -31,8 +31,9 @@ func cveInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { return cves } -func cveSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - cves := cveInfoFromOutput(outputs) +// CVESummaryFromOutput returns a summary string of CVE vulnerability status. +func CVESummaryFromOutput(outputs map[string]script.ScriptOutput) string { + cves := CVEInfoFromOutput(outputs) if len(cves) == 0 { return "" } diff --git a/internal/common/storage.go b/internal/extract/storage.go similarity index 53% rename from internal/common/storage.go rename to internal/extract/storage.go index e445a3be..c0df3381 100644 --- a/internal/common/storage.go +++ b/internal/extract/storage.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package common +package extract import ( "fmt" @@ -10,9 +10,11 @@ import ( "strings" "perfspect/internal/script" + "perfspect/internal/table" ) -type diskInfo struct { +// DiskInfo represents disk/storage device information. +type DiskInfo struct { Name string Model string Size string @@ -29,8 +31,9 @@ type diskInfo struct { MaxLinkWidth string } -func DiskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { - diskInfos := []diskInfo{} +// DiskInfoFromOutput extracts disk information from script outputs. +func DiskInfoFromOutput(outputs map[string]script.ScriptOutput) []DiskInfo { + diskInfos := []DiskInfo{} for i, line := range strings.Split(outputs[script.DiskInfoScriptName].Stdout, "\n") { // first line is the header if i == 0 { @@ -66,11 +69,12 @@ func DiskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { } } } - diskInfos = append(diskInfos, diskInfo{fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], fields[9], fields[10], fields[11], fields[12], fields[13]}) + diskInfos = append(diskInfos, DiskInfo{fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], fields[9], fields[10], fields[11], fields[12], fields[13]}) } return diskInfos } +// DiskSummaryFromOutput returns a summary of installed disks. func DiskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { disks := DiskInfoFromOutput(outputs) if len(disks) == 0 { @@ -94,3 +98,51 @@ func DiskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { } return strings.Join(summary, ", ") } + +// FilesystemFieldValuesFromOutput returns filesystem information as table fields. +func FilesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []table.Field { + fieldValues := []table.Field{} + reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) + for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { + if line == "" { + continue + } + fields := strings.Fields(line) + // "Mounted On" gets split into two fields, rejoin + if i == 0 && len(fields) >= 2 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { + fields[len(fields)-2] = "Mounted on" + fields = fields[:len(fields)-1] + for _, field := range fields { + fieldValues = append(fieldValues, table.Field{Name: field, Values: []string{}}) + } + // add an additional field + fieldValues = append(fieldValues, table.Field{Name: "Mount Options", Values: []string{}}) + continue + } + if len(fields) != len(fieldValues)-1 { + slog.Error("unexpected number of fields in df output", slog.String("line", line)) + return nil + } + for i, field := range fields { + fieldValues[i].Values = append(fieldValues[i].Values, field) + } + // get mount options for the current file system + var options string + for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { + if i == 0 { + continue + } + match := reFindmnt.FindStringSubmatch(line) + if match != nil && len(fields) > 5 { + target := match[1] + source := match[2] + if fields[0] == source && fields[5] == target { + options = match[4] + break + } + } + } + fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) + } + return fieldValues +} diff --git a/internal/extract/system.go b/internal/extract/system.go new file mode 100644 index 00000000..bad1e7db --- /dev/null +++ b/internal/extract/system.go @@ -0,0 +1,91 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package extract + +import ( + "fmt" + "strings" + "time" + + "perfspect/internal/cpus" + "perfspect/internal/script" +) + +// SystemSummaryFromOutput returns a formatted system summary string. +func SystemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . + template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." + var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string + + // system type + systemType = ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + // socket count + socketCount = ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) + // CPU model + cpuModel = ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) + // core count + coreCount = ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) + // TDP + tdp = TDPFromOutput(outputs) + if tdp == "" { + tdp = "?" + } + vendor := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + // hyperthreading + htLabel = "HT" + if vendor == cpus.AMDVendor { + htLabel = "SMT" + } + htOnOff = HyperthreadingFromOutput(outputs) + switch htOnOff { + case "Enabled": + htOnOff = "On" + case "Disabled": + htOnOff = "Off" + case "N/A": + htOnOff = "N/A" + default: + htOnOff = "?" + } + // turbo + turboLabel = "Turbo" + if vendor == cpus.AMDVendor { + turboLabel = "Boost" + } + turboOnOff = TurboEnabledFromOutput(outputs) + if strings.Contains(strings.ToLower(turboOnOff), "enabled") { + turboOnOff = "On" + } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { + turboOnOff = "Off" + } else { + turboOnOff = "?" + } + // memory + installedMem = InstalledMemoryFromOutput(outputs) + // BIOS + biosVersion = ValFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) + // microcode + uCodeVersion = ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) + // NICs + nics = NICSummaryFromOutput(outputs) + // disks + disks = DiskSummaryFromOutput(outputs) + // OS + operatingSystem = OperatingSystemFromOutput(outputs) + // kernel + kernelVersion = ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) + // date + date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) + // parse date so that we can format it + parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM + if err != nil { + parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM + } + if err == nil { + date = parsedTime.Format("January 2 2006") + } + + // put it all together + return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) +} diff --git a/internal/common/turbostat.go b/internal/extract/turbostat.go similarity index 73% rename from internal/common/turbostat.go rename to internal/extract/turbostat.go index 515a4fb0..898c2d6d 100644 --- a/internal/common/turbostat.go +++ b/internal/extract/turbostat.go @@ -1,4 +1,7 @@ -package common +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package extract import ( "fmt" @@ -9,14 +12,7 @@ import ( "time" ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - // parseTurbostatOutput parses turbostat output text into a slice of maps. -// Each map represents a row, with column names as keys and values as strings. -// Adds a "timestamp" key to each row, if TIME and INTERVAL are included in -// the output by the collection script. -// Only the Summary and Packages rows are returned, i.e., rows for individual cores/CPUs are ignored. func parseTurbostatOutput(output string) ([]map[string]string, error) { var ( headers []string @@ -42,7 +38,6 @@ func parseTurbostatOutput(output string) ([]map[string]string, error) { } if val, found := strings.CutPrefix(line, "TIME:"); found { val = strings.TrimSpace(val) - // Try to parse as HH:MM:SS var err error timestamp, err = time.Parse("15:04:05", val) if err != nil { @@ -52,14 +47,11 @@ func parseTurbostatOutput(output string) ([]map[string]string, error) { timeParsed = true continue } - // parse the fields in the line fields := strings.Fields(line) - // if this is a header line if len(fields) >= 1 && slices.Contains([]string{"package", "die", "node", "core", "cpu"}, strings.ToLower(fields[0])) { if len(headers) == 0 { - headers = fields // first line with a column name is the header + headers = fields } else { - // bump the timestamp to the next interval if timeParsed && interval > 0 { timestamp = timestamp.Add(time.Duration(interval) * time.Second) } @@ -67,16 +59,15 @@ func parseTurbostatOutput(output string) ([]map[string]string, error) { continue } if len(headers) == 0 { - continue // skip data lines before first header + continue } if len(fields) != len(headers) { - continue // skip core lines + continue } row := make(map[string]string, len(headers)) for i, h := range headers { row[h] = fields[i] } - // Add timestamp to row if timeParsed && interval > 0 { row["timestamp"] = timestamp.Format("15:04:05") } @@ -87,9 +78,7 @@ func parseTurbostatOutput(output string) ([]map[string]string, error) { } // TurbostatPlatformRows parses the output of the turbostat script and returns the rows -// for the platform (summary) only, for the specified field names. -// The "platform" rows are those where Package, Die, Core, and CPU are all "-". -// The first column is the sample time, and the rest are the values for the specified fields. +// for the platform (summary) only. func TurbostatPlatformRows(turboStatScriptOutput string, fieldNames []string) ([][]string, error) { if turboStatScriptOutput == "" { return nil, fmt.Errorf("turbostat output is empty") @@ -104,18 +93,16 @@ func TurbostatPlatformRows(turboStatScriptOutput string, fieldNames []string) ([ if len(rows) == 0 { return nil, fmt.Errorf("no platform rows found in turbostat output") } - // filter the rows to the summary rows only var fieldValues [][]string for _, row := range rows { if !isPlatformRow(row) { continue } - // this is a summary row, extract the values for the specified fields - rowValues := make([]string, len(fieldNames)+1) // +1 for the sample time - rowValues[0] = row["timestamp"] // first column is the sample time + rowValues := make([]string, len(fieldNames)+1) + rowValues[0] = row["timestamp"] for i, fieldName := range fieldNames { if value, ok := row[fieldName]; ok { - rowValues[i+1] = value // +1 for the sample time + rowValues[i+1] = value } else { return nil, fmt.Errorf("field %s not found in turbostat output", fieldName) } @@ -129,8 +116,6 @@ func TurbostatPlatformRows(turboStatScriptOutput string, fieldNames []string) ([ return fieldValues, nil } -// isPlatformRow returns true if the row represents a platform (summary) row. -// only consider rows where Package, Die, Node, Core, and CPU are "-" (or don't exist), these rows contain the sum of all packages func isPlatformRow(row map[string]string) bool { for _, header := range []string{"Package", "Die", "Node", "Core", "CPU"} { if val, ok := row[header]; ok && val != "-" { @@ -141,8 +126,7 @@ func isPlatformRow(row map[string]string) bool { } // TurbostatPackageRows parses the output of the turbostat script and returns the rows -// for each package, for the specified field names. -// The first column is the sample time, and the rest are the values for the specified fields. +// for each package. func TurbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([][][]string, error) { if turboStatScriptOutput == "" { return nil, fmt.Errorf("turbostat output is empty") @@ -159,24 +143,21 @@ func TurbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([] } var packageRows [][][]string for _, row := range rows { - // not all instances of turbostat output include a Package column - // if it is missing assume 1 package, set it to 0 for rows where CPU is 0 if _, ok := row["Package"]; !ok { if row["CPU"] == "0" { row["Package"] = "0" } else { - continue // skip rows that are not package rows + continue } } if !isPackageRow(row) { continue } - // this is a package row, extract the values for the specified fields - rowValues := make([]string, len(fieldNames)+1) // +1 for the sample time - rowValues[0] = row["timestamp"] // first column is the sample time + rowValues := make([]string, len(fieldNames)+1) + rowValues[0] = row["timestamp"] for i, fieldName := range fieldNames { if value, ok := row[fieldName]; ok { - rowValues[i+1] = value // +1 for the sample time + rowValues[i+1] = value } else { return nil, fmt.Errorf("field %s not found in turbostat output", fieldName) } @@ -185,11 +166,9 @@ func TurbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([] if err != nil { return nil, fmt.Errorf("unable to parse package number: %s", row["Package"]) } - // if we have a new package, start a new package row if len(packageRows) < packageNum+1 { packageRows = append(packageRows, [][]string{rowValues}) } else { - // append to the associated package row packageRows[packageNum] = append(packageRows[packageNum], rowValues) } } @@ -199,8 +178,6 @@ func TurbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([] return packageRows, nil } -// isPackageRow returns true if the row represents a package row. -// only consider rows where Package is not "-" or empty func isPackageRow(row map[string]string) bool { if val, ok := row["Package"]; ok && val != "-" { return true @@ -221,7 +198,6 @@ func MaxTotalPackagePowerFromOutput(turbostatOutput string) string { var maxPower float64 var ignoredFirstReading bool for _, row := range rows { - // only consider rows where CPU, Package, or Core is "-", these rows contain the sum of all packgaes if row["CPU"] != "-" && row["CPU"] != "" || row["Package"] != "-" && row["Package"] != "" || row["Core"] != "-" && row["Core"] != "" { @@ -229,7 +205,6 @@ func MaxTotalPackagePowerFromOutput(turbostatOutput string) string { } if wattStr, ok := row["PkgWatt"]; ok { if !ignoredFirstReading { - // skip the first reading, it is usually not representative of the system state ignoredFirstReading = true continue } @@ -238,7 +213,6 @@ func MaxTotalPackagePowerFromOutput(turbostatOutput string) string { slog.Warn("unable to parse power value", slog.String("value", wattStr), slog.String("error", err.Error())) continue } - // Filter out anomalous high readings. Turbostat sometimes reports very high power values that are not realistic. if watt > 10000 { slog.Warn("ignoring anomalous high power reading", slog.String("value", wattStr)) continue @@ -266,7 +240,6 @@ func MinTotalPackagePowerFromOutput(turbostatOutput string) string { } var minPower float64 for _, row := range rows { - // only consider rows where CPU, Package, or Core is "-", these rows contain the sum of all packgaes if row["CPU"] != "-" && row["CPU"] != "" || row["Package"] != "-" && row["Package"] != "" || row["Core"] != "-" && row["Core"] != "" { @@ -302,7 +275,6 @@ func MaxPackageTemperatureFromOutput(turbostatOutput string) string { var maxTemp float64 var ignoredFirstReading bool for _, row := range rows { - // only consider rows where CPU, Package, or Core is "-", these rows contain the sum of all packgaes if row["CPU"] != "-" && row["CPU"] != "" || row["Package"] != "-" && row["Package"] != "" || row["Core"] != "-" && row["Core"] != "" { @@ -310,7 +282,6 @@ func MaxPackageTemperatureFromOutput(turbostatOutput string) string { } if tempStr, ok := row["PkgTmp"]; ok { if !ignoredFirstReading { - // skip the first reading, it is usually not representative of the system state ignoredFirstReading = true continue } @@ -319,7 +290,6 @@ func MaxPackageTemperatureFromOutput(turbostatOutput string) string { slog.Warn("unable to parse temperature value", slog.String("value", tempStr), slog.String("error", err.Error())) continue } - // Filter out anomalous high readings. Turbostat sometimes reports very high temperature values that are not realistic. if temp > 200 { slog.Warn("ignoring anomalous high temperature reading", slog.String("value", tempStr)) continue diff --git a/internal/common/turbostat_test.go b/internal/extract/turbostat_test.go similarity index 99% rename from internal/common/turbostat_test.go rename to internal/extract/turbostat_test.go index 540097f1..b0f85c55 100644 --- a/internal/common/turbostat_test.go +++ b/internal/extract/turbostat_test.go @@ -1,4 +1,4 @@ -package common +package extract import ( "reflect" diff --git a/internal/workflow/collection.go b/internal/workflow/collection.go new file mode 100644 index 00000000..82983d65 --- /dev/null +++ b/internal/workflow/collection.go @@ -0,0 +1,183 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package workflow + +import ( + "fmt" + "log/slog" + "strings" + + "slices" + + "perfspect/internal/app" + "perfspect/internal/progress" + "perfspect/internal/report" + "perfspect/internal/script" + "perfspect/internal/table" + "perfspect/internal/target" + "perfspect/internal/util" + + "github.com/spf13/cobra" +) + +// outputsFromInput reads the raw file(s) and returns the data in the order of the raw files +func outputsFromInput(tables []table.TableDefinition, summaryTableName string) ([]TargetScriptOutputs, error) { + orderedTargetScriptOutputs := []TargetScriptOutputs{} + includedTables := []table.TableDefinition{} + // read the raw file(s) as JSON + rawReports, err := report.ReadRawReports(app.FlagInput) + if err != nil { + err = fmt.Errorf("failed to read raw file(s): %w", err) + return nil, err + } + for _, rawReport := range rawReports { + for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories + // filter out tables that we add after processing + if tableName == app.TableNameInsights || tableName == app.TableNamePerfspect || tableName == summaryTableName { + continue + } + includedTable, err := findTableByName(tables, tableName) + if err != nil { + slog.Warn("table from raw report not found in current tables", slog.String("table", tableName), slog.String("target", rawReport.TargetName)) + continue + } + includedTables = append(includedTables, *includedTable) + } + orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, Tables: includedTables}) + } + return orderedTargetScriptOutputs, nil +} + +// outputsFromTargets runs the scripts on the targets and returns the data in the order of the targets +func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tables []table.TableDefinition, scriptParams map[string]string, statusUpdate progress.MultiSpinnerUpdateFunc, localTempDir string) ([]TargetScriptOutputs, error) { + orderedTargetScriptOutputs := []TargetScriptOutputs{} + channelTargetScriptOutputs := make(chan TargetScriptOutputs) + channelError := make(chan error) + // create the list of tables and associated scripts for each target + targetTables := [][]table.TableDefinition{} + targetScriptNames := [][]string{} + for targetIdx, target := range myTargets { + targetTables = append(targetTables, []table.TableDefinition{}) + targetScriptNames = append(targetScriptNames, []string{}) + for _, tbl := range tables { + if isTableForTarget(tbl, target, localTempDir) { + // add table to list of tables to collect + targetTables[targetIdx] = append(targetTables[targetIdx], tbl) + // add scripts to list of scripts to run + for _, scriptName := range tbl.ScriptNames { + targetScriptNames[targetIdx] = util.UniqueAppend(targetScriptNames[targetIdx], scriptName) + } + } else { + slog.Debug("table not supported for target", slog.String("table", tbl.Name), slog.String("target", target.GetName())) + } + } + } + // run the scripts on the targets + for targetIdx, target := range myTargets { + scriptsToRunOnTarget := []script.ScriptDefinition{} + for _, scriptName := range targetScriptNames[targetIdx] { + script := script.GetParameterizedScriptByName(scriptName, scriptParams) + scriptsToRunOnTarget = append(scriptsToRunOnTarget, script) + } + // run the selected scripts on the target + ctrlCToStop := cmd.Name() == "telemetry" || cmd.Name() == "flamegraph" + go collectOnTarget(target, scriptsToRunOnTarget, localTempDir, scriptParams["Duration"], ctrlCToStop, channelTargetScriptOutputs, channelError, statusUpdate) + } + // wait for scripts to run on all targets + var allTargetScriptOutputs []TargetScriptOutputs + for range myTargets { + select { + case scriptOutputs := <-channelTargetScriptOutputs: + allTargetScriptOutputs = append(allTargetScriptOutputs, scriptOutputs) + case err := <-channelError: + slog.Error(err.Error()) + } + } + // allTargetScriptOutputs is in the order of data collection completion + // reorder to match order of myTargets + for targetIdx, target := range myTargets { + for _, targetScriptOutputs := range allTargetScriptOutputs { + if targetScriptOutputs.TargetName == target.GetName() { + targetScriptOutputs.Tables = targetTables[targetIdx] + orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, targetScriptOutputs) + break + } + } + } + return orderedTargetScriptOutputs, nil +} + +// isTableForTarget checks if the given table is applicable for the specified target +func isTableForTarget(tbl table.TableDefinition, t target.Target, localTempDir string) bool { + if len(tbl.Architectures) > 0 { + architecture, err := t.GetArchitecture() + if err != nil { + slog.Error("failed to get architecture for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) + return false + } + if !slices.Contains(tbl.Architectures, architecture) { + return false + } + } + if len(tbl.Vendors) > 0 { + vendor, err := GetTargetVendor(t) + if err != nil { + slog.Error("failed to get vendor for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) + return false + } + if !slices.Contains(tbl.Vendors, vendor) { + return false + } + } + if len(tbl.MicroArchitectures) > 0 { + uarch, err := GetTargetMicroArchitecture(t, localTempDir, false) + if err != nil { + slog.Error("failed to get microarchitecture for target", slog.String("target", t.GetName()), slog.String("error", err.Error())) + } + shortUarch := strings.Split(uarch, "_")[0] // handle EMR_XCC, etc. + shortUarch = strings.Split(shortUarch, "-")[0] // handle GNR-D + shortUarch = strings.Split(shortUarch, " ")[0] // handle Turin (Zen 5) + if !slices.Contains(tbl.MicroArchitectures, uarch) && !slices.Contains(tbl.MicroArchitectures, shortUarch) { + return false + } + } + return true +} + +// elevatedPrivilegesRequired returns true if any of the scripts needed for the tables require elevated privileges +func elevatedPrivilegesRequired(tables []table.TableDefinition) bool { + for _, tbl := range tables { + for _, scriptName := range tbl.ScriptNames { + script := script.GetScriptByName(scriptName) + if script.Superuser { + return true + } + } + } + return false +} + +// collectOnTarget runs the scripts on the target and sends the results to the appropriate channels +func collectOnTarget(myTarget target.Target, scriptsToRun []script.ScriptDefinition, localTempDir string, duration string, ctrlCToStop bool, channelTargetScriptOutputs chan TargetScriptOutputs, channelError chan error, statusUpdate progress.MultiSpinnerUpdateFunc) { + // run the scripts on the target + status := "collecting data" + if ctrlCToStop && duration == "0" { + status += ", press Ctrl+c to stop" + } else if duration != "0" && duration != "" { + status += fmt.Sprintf(" for %s seconds", duration) + } + scriptOutputs, err := RunScripts(myTarget, scriptsToRun, true, localTempDir, statusUpdate, status, false) + if err != nil { + if statusUpdate != nil { + _ = statusUpdate(myTarget.GetName(), fmt.Sprintf("error collecting data: %v", err)) + } + err = fmt.Errorf("error running data collection scripts on %s: %v", myTarget.GetName(), err) + channelError <- err + return + } + if statusUpdate != nil { + _ = statusUpdate(myTarget.GetName(), "collection complete") + } + channelTargetScriptOutputs <- TargetScriptOutputs{TargetName: myTarget.GetName(), ScriptOutputs: scriptOutputs} +} diff --git a/internal/workflow/reports.go b/internal/workflow/reports.go new file mode 100644 index 00000000..8b7f05e2 --- /dev/null +++ b/internal/workflow/reports.go @@ -0,0 +1,183 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package workflow + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "strings" + + "slices" + + "perfspect/internal/app" + "perfspect/internal/report" + "perfspect/internal/table" + "perfspect/internal/util" +) + +// createRawReports creates the raw report(s) from the collected data +// returns the list of report files creates or an error if the report creation failed. +func (rc *ReportingCommand) createRawReports(appContext app.Context, orderedTargetScriptOutputs []TargetScriptOutputs) ([]string, error) { + var reports []string + for _, targetScriptOutputs := range orderedTargetScriptOutputs { + reportBytes, err := report.CreateRawReport(rc.Tables, targetScriptOutputs.ScriptOutputs, targetScriptOutputs.TargetName) + if err != nil { + err = fmt.Errorf("failed to create raw report: %w", err) + return reports, err + } + post := "" + if rc.ReportNamePost != "" { + post = "_" + rc.ReportNamePost + } + reportFilename := fmt.Sprintf("%s%s.%s", targetScriptOutputs.TargetName, post, "raw") + reportPath := filepath.Join(appContext.OutputDir, reportFilename) + if err = writeReport(reportBytes, reportPath); err != nil { + err = fmt.Errorf("failed to write report: %w", err) + return reports, err + } + reports = append(reports, reportPath) + } + return reports, nil +} + +// writeReport writes the report bytes to the specified path. +func writeReport(reportBytes []byte, reportPath string) error { + err := os.WriteFile(reportPath, reportBytes, 0644) // #nosec G306 + if err != nil { + err = fmt.Errorf("failed to write report file: %v", err) + fmt.Fprintln(os.Stderr, err) + slog.Error(err.Error()) + return err + } + return nil +} + +// createReports processes the collected data and creates the requested report(s) +func (rc *ReportingCommand) createReports(appContext app.Context, orderedTargetScriptOutputs []TargetScriptOutputs, formats []string) ([]string, error) { + reportFilePaths := []string{} + allTargetsTableValues := make([][]table.TableValues, 0) + for _, targetScriptOutputs := range orderedTargetScriptOutputs { + // process the tables, i.e., get field values from script output + allTableValues, err := table.ProcessTables(targetScriptOutputs.Tables, targetScriptOutputs.ScriptOutputs) + if err != nil { + err = fmt.Errorf("failed to process collected data: %w", err) + return nil, err + } + // special case - the summary table is built from the post-processed data, i.e., table values + if rc.SummaryFunc != nil { + summaryTableValues := rc.SummaryFunc(allTableValues, targetScriptOutputs.ScriptOutputs) + // insert the summary table before the table specified by SummaryBeforeTableName, otherwise append it at the end + summaryBeforeTableFound := false + if rc.SummaryBeforeTableName != "" { + for i, tableValues := range allTableValues { + if tableValues.TableDefinition.Name == rc.SummaryBeforeTableName { + summaryBeforeTableFound = true + // insert the summary table before this table + allTableValues = append(allTableValues[:i], append([]table.TableValues{summaryTableValues}, allTableValues[i:]...)...) + break + } + } + } + if !summaryBeforeTableFound { + // append the summary table at the end + allTableValues = append(allTableValues, summaryTableValues) + } + } + // special case - add tableValues for Insights + if rc.InsightsFunc != nil { + insightsTableValues := rc.InsightsFunc(allTableValues, targetScriptOutputs.ScriptOutputs) + allTableValues = append(allTableValues, insightsTableValues) + } + // special case - add tableValues for the application version + allTableValues = append(allTableValues, table.TableValues{ + TableDefinition: table.TableDefinition{ + Name: app.TableNamePerfspect, + }, + Fields: []table.Field{ + {Name: "Version", Values: []string{appContext.Version}}, + {Name: "Args", Values: []string{strings.Join(os.Args, " ")}}, + {Name: "OutputDir", Values: []string{appContext.OutputDir}}, + }, + }) + // create the report(s) + for _, format := range formats { + reportBytes, err := report.Create(format, allTableValues, targetScriptOutputs.TargetName, rc.SystemSummaryTableName) + if err != nil { + err = fmt.Errorf("failed to create report: %w", err) + return nil, err + } + if len(formats) == 1 && format == report.FormatTxt { + fmt.Printf("%s:\n", targetScriptOutputs.TargetName) + fmt.Print(string(reportBytes)) + } + post := "" + if rc.ReportNamePost != "" { + post = "_" + rc.ReportNamePost + } + reportFilename := fmt.Sprintf("%s%s.%s", targetScriptOutputs.TargetName, post, format) + reportPath := filepath.Join(appContext.OutputDir, reportFilename) + if err = writeReport(reportBytes, reportPath); err != nil { + err = fmt.Errorf("failed to write report: %w", err) + return nil, err + } + reportFilePaths = append(reportFilePaths, reportPath) + } + // keep all the targets table values for combined reports + allTargetsTableValues = append(allTargetsTableValues, allTableValues) + } + if len(allTargetsTableValues) > 1 && len(orderedTargetScriptOutputs) > 1 { + // list of target names for the combined report + // - only those that we received output from + targetNames := make([]string, 0) + for _, targetScriptOutputs := range orderedTargetScriptOutputs { + targetNames = append(targetNames, targetScriptOutputs.TargetName) + } + // merge table names from all targets maintaining the order of the tables + mergedTableNames := util.MergeOrderedUnique(extractTableNamesFromValues(allTargetsTableValues)) + multiTargetFormats := []string{report.FormatHtml, report.FormatXlsx} + for _, format := range multiTargetFormats { + if !slices.Contains(formats, format) { + continue + } + reportBytes, err := report.CreateMultiTarget(format, allTargetsTableValues, targetNames, mergedTableNames, rc.SummaryTableName) + if err != nil { + err = fmt.Errorf("failed to create multi-target %s report: %w", format, err) + return nil, err + } + reportFilename := fmt.Sprintf("%s.%s", "all_hosts", format) + reportPath := filepath.Join(appContext.OutputDir, reportFilename) + if err = writeReport(reportBytes, reportPath); err != nil { + err = fmt.Errorf("failed to write multi-target %s report: %w", format, err) + return nil, err + } + reportFilePaths = append(reportFilePaths, reportPath) + } + } + return reportFilePaths, nil +} + +// extractTableNamesFromValues extracts the table names from the processed table values for each target. +// It returns a slice of slices, where each inner slice contains the table names for a target. +func extractTableNamesFromValues(allTargetsTableValues [][]table.TableValues) [][]string { + targetTableNames := make([][]string, 0, len(allTargetsTableValues)) + for _, tableValues := range allTargetsTableValues { + names := make([]string, 0, len(tableValues)) + for _, tv := range tableValues { + names = append(names, tv.TableDefinition.Name) + } + targetTableNames = append(targetTableNames, names) + } + return targetTableNames +} + +func findTableByName(tables []table.TableDefinition, name string) (*table.TableDefinition, error) { + for _, tbl := range tables { + if tbl.Name == name { + return &tbl, nil + } + } + return nil, fmt.Errorf("table [%s] not found", name) +} diff --git a/internal/workflow/signals.go b/internal/workflow/signals.go new file mode 100644 index 00000000..58fb935e --- /dev/null +++ b/internal/workflow/signals.go @@ -0,0 +1,131 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package workflow + +import ( + "context" + "log/slog" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" + "time" + + "perfspect/internal/progress" + "perfspect/internal/script" + "perfspect/internal/target" + "perfspect/internal/util" +) + +func signalProcessOnTarget(t target.Target, pidStr string, sigStr string) error { + var cmd *exec.Cmd + // prepend "-" to the signal string if not already present + if !strings.HasPrefix(sigStr, "-") { + sigStr = "-" + sigStr + } + if !t.IsSuperUser() && t.CanElevatePrivileges() { + cmd = exec.Command("sudo", "kill", sigStr, pidStr) + } else { + cmd = exec.Command("kill", sigStr, pidStr) + } + _, _, _, err := t.RunCommandEx(cmd, 5, false, true) // #nosec G204 + return err +} + +// configureSignalHandler sets up a signal handler to catch SIGINT and SIGTERM +// +// When perfspect receives ctrl-c while in the shell, the shell propagates the +// signal to all our children. But when perfspect is run in the background or disowned and +// then receives SIGINT, e.g., from a script, we need to send the signal to our children +// +// When running scripts using the controller.sh script, we need to send the signal to the +// controller.sh script on each target so that it can clean up its child processes. This is +// because the controller.sh script is run in its own process group and does not receive the +// signal when perfspect receives it. +// +// Parameters: +// - myTargets: The list of targets to send the signal to. +// - statusFunc: A function to update the status of the progress indicator. +func configureSignalHandler(myTargets []target.Target, statusFunc progress.MultiSpinnerUpdateFunc) { + sigChannel := make(chan os.Signal, 1) + signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigChannel + slog.Debug("received signal", slog.String("signal", sig.String())) + // The controller.sh script is run in its own process group, so we need to send the signal + // directly to the PID of the controller. For every target, look for the primary_collection_script + // PID file and send SIGINT to it. + // The controller script is run in its own process group, so we need to send the signal + // directly to the PID of the controller. For every target, look for the controller + // PID file and send SIGINT to it. + for _, t := range myTargets { + if statusFunc != nil { + _ = statusFunc(t.GetName(), "Signal received, cleaning up...") + } + pidFilePath := filepath.Join(t.GetTempDirectory(), script.ControllerPIDFileName) + stdout, _, exitcode, err := t.RunCommandEx(exec.Command("cat", pidFilePath), 5, false, true) // #nosec G204 + if err != nil { + slog.Error("error retrieving target controller PID", slog.String("target", t.GetName()), slog.String("error", err.Error())) + } + if exitcode == 0 { + pidStr := strings.TrimSpace(stdout) + err = signalProcessOnTarget(t, pidStr, "SIGINT") + if err != nil { + slog.Error("error sending SIGINT signal to target controller", slog.String("target", t.GetName()), slog.String("error", err.Error())) + } + } + } + // now wait until all controller scripts have exited + slog.Debug("waiting for controller scripts to exit") + for _, t := range myTargets { + // create a per-target timeout context + targetTimeout := 10 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), targetTimeout) + timedOut := false + pidFilePath := filepath.Join(t.GetTempDirectory(), script.ControllerPIDFileName) + for { + // read the pid file + stdout, _, exitcode, err := t.RunCommandEx(exec.Command("cat", pidFilePath), 5, false, true) // #nosec G204 + if err != nil || exitcode != 0 { + // pid file doesn't exist + break + } + pidStr := strings.TrimSpace(stdout) + // determine if the process still exists + _, _, exitcode, err = t.RunCommandEx(exec.Command("ps", "-p", pidStr), 5, false, true) // #nosec G204 + if err != nil || exitcode != 0 { + break // process no longer exists, script has exited + } + // check for timeout + select { + case <-ctx.Done(): + timedOut = true + default: + } + if timedOut { + if statusFunc != nil { + _ = statusFunc(t.GetName(), "cleanup timeout exceeded, sending kill signal") + } + slog.Warn("signal handler cleanup timeout exceeded for target, sending SIGKILL", slog.String("target", t.GetName())) + err = signalProcessOnTarget(t, pidStr, "SIGKILL") + if err != nil { + slog.Error("error sending SIGKILL signal to target controller", slog.String("target", t.GetName()), slog.String("error", err.Error())) + } + break + } + // sleep for a short time before checking again + time.Sleep(500 * time.Millisecond) + } + cancel() + } + + // send SIGINT to perfspect's children + err := util.SignalChildren(syscall.SIGINT) + if err != nil { + slog.Error("error sending signal to children", slog.String("error", err.Error())) + } + }() +} diff --git a/internal/common/targets.go b/internal/workflow/targets.go similarity index 94% rename from internal/common/targets.go rename to internal/workflow/targets.go index 2ab11df8..f7134907 100644 --- a/internal/common/targets.go +++ b/internal/workflow/targets.go @@ -1,8 +1,8 @@ -package common - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package workflow + import ( "fmt" "log/slog" @@ -10,11 +10,6 @@ import ( "os/exec" "os/user" "path" - "perfspect/internal/cpus" - "perfspect/internal/progress" - "perfspect/internal/script" - "perfspect/internal/target" - "perfspect/internal/util" "regexp" "runtime" "strconv" @@ -22,6 +17,13 @@ import ( "slices" + "perfspect/internal/app" + "perfspect/internal/cpus" + "perfspect/internal/progress" + "perfspect/internal/script" + "perfspect/internal/target" + "perfspect/internal/util" + "github.com/spf13/cobra" "golang.org/x/term" "gopkg.in/yaml.v2" @@ -45,7 +47,7 @@ const ( FlagTargetKeyName = "key" ) -var targetFlags = []Flag{ +var targetFlags = []app.Flag{ {Name: FlagTargetHostName, Help: "host name or IP address of remote target"}, {Name: FlagTargetPortName, Help: "port for SSH to remote target"}, {Name: FlagTargetUserName, Help: "user name for SSH to remote target"}, @@ -53,6 +55,7 @@ var targetFlags = []Flag{ {Name: FlagTargetsFileName, Help: "file with remote target(s) connection details. See targets.yaml for format."}, } +// AddTargetFlags adds target-related flags to the command. func AddTargetFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&flagTargetHost, FlagTargetHostName, "", targetFlags[0].Help) cmd.Flags().StringVar(&flagTargetPort, FlagTargetPortName, "", targetFlags[1].Help) @@ -63,13 +66,15 @@ func AddTargetFlags(cmd *cobra.Command) { cmd.MarkFlagsMutuallyExclusive(FlagTargetHostName, FlagTargetsFileName) } -func GetTargetFlagGroup() FlagGroup { - return FlagGroup{ +// GetTargetFlagGroup returns the flag group for target options. +func GetTargetFlagGroup() app.FlagGroup { + return app.FlagGroup{ GroupName: "Remote Target Options", Flags: targetFlags, } } +// ValidateTargetFlags validates the target-related flags. func ValidateTargetFlags(cmd *cobra.Command) error { if flagTargetsFile != "" && flagTargetHost != "" { return fmt.Errorf("only one of --%s or --%s can be specified", FlagTargetsFileName, FlagTargetHostName) @@ -175,15 +180,6 @@ func GetTargets(cmd *cobra.Command, needsElevatedPrivileges bool, failIfCantElev } // getSingleTarget returns a target.Target object representing the target host and associated details. -// The function takes the following parameters: -// - cmd: A pointer to the cobra.Command object representing the command. -// - needsElevatedPriviliges: A boolean indicating whether elevated privileges are required. -// - failIfCantElevate: A boolean indicating whether to fail if elevated privileges can't be obtained. -// - localTempDir: A string representing the local temporary directory. -// The function returns the following values: -// - myTarget: A target.Target object representing the target host and associated details. -// - targetError: An error indicating a problem with the target host connection. -// - err: An error object indicating any error that occurred during the function execution. func getSingleTarget(cmd *cobra.Command, needsElevatedPrivileges bool, failIfCantElevate bool, localTempDir string) (target.Target, error, error) { targetHost, _ := cmd.Flags().GetString(FlagTargetHostName) targetPort, _ := cmd.Flags().GetString(FlagTargetPortName) @@ -341,7 +337,6 @@ func sanitizeTargetName(targetName string) string { } // getTargetsFromFile reads a targets file and returns a list of target objects. -// It takes the path to the targets file and the local temporary directory as input. func getTargetsFromFile(targetsFilePath string, localTempDir string) (targets []target.Target, targetErrs []error, err error) { var targetsFile targetsFile // read the file into a byte array @@ -402,9 +397,6 @@ func getTargetsFromFile(targetsFilePath string, localTempDir string) (targets [] } // getPassword prompts the user for a password and returns it as a string. -// It takes a prompt string as input and displays the prompt to the user. -// The user's input is hidden as they type, and the entered password is returned as a string. -// If an error occurs while reading the password, it is returned along with an empty string. func getPassword(prompt string) (string, error) { fmt.Fprintf(os.Stderr, "\n%s: ", prompt) pwd, err := term.ReadPassword(0) @@ -429,12 +421,6 @@ func getHostArchitecture() (string, error) { } // fieldFromDfpOutput parses the output of the `df -P ` command and returns the specified field value. -// example output: -// -// Filesystem 1024-blocks Used Available Capacity Mounted on -// /dev/sda2 1858388360 17247372 1747419536 1% / -// -// Returns the value of the specified field from the second line of the output. func fieldFromDfpOutput(dfOutput string, fieldName string) (string, error) { lines := strings.Split(dfOutput, "\n") if len(lines) < 2 { @@ -468,7 +454,6 @@ type mountRecord struct { } // parseMountOutput parses the output of the `mount` command and returns a slice of mountRecord structs. -// e.g., "sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)" func parseMountOutput(mountOutput string) ([]mountRecord, error) { var mounts []mountRecord for line := range strings.SplitSeq(mountOutput, "\n") { @@ -544,10 +529,12 @@ func isDirNoExec(t target.Target, dir string) (bool, error) { return false, fmt.Errorf("filesystem %s and mount point %s are not found in mount records", filesystem, mountedOn) } +// GetTargetArchitecture returns the architecture of the target. func GetTargetArchitecture(t target.Target) (string, error) { return t.GetArchitecture() } +// GetTargetVendor returns the CPU vendor of the target. func GetTargetVendor(t target.Target) (string, error) { vendor := t.GetVendor() if vendor == "" { @@ -563,6 +550,7 @@ func GetTargetVendor(t target.Target) (string, error) { return vendor, nil } +// GetTargetFamily returns the CPU family of the target. func GetTargetFamily(t target.Target) (string, error) { family := t.GetFamily() if family == "" { @@ -578,6 +566,7 @@ func GetTargetFamily(t target.Target) (string, error) { return family, nil } +// GetTargetModel returns the CPU model of the target. func GetTargetModel(t target.Target) (string, error) { model := t.GetModel() if model == "" { @@ -593,6 +582,7 @@ func GetTargetModel(t target.Target) (string, error) { return model, nil } +// GetTargetStepping returns the CPU stepping of the target. func GetTargetStepping(t target.Target) (string, error) { stepping := t.GetStepping() if stepping == "" { @@ -608,11 +598,12 @@ func GetTargetStepping(t target.Target) (string, error) { return stepping, nil } +// GetTargetCapid4 returns the CAPID4 value of the target. func GetTargetCapid4(t target.Target, localTempDir string, noRoot bool) (string, error) { capid4 := t.GetCapid4() if capid4 == "" { getScript := script.GetScriptByName(script.LspciBitsScriptName) - scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call common.RunScript, otherwise infinite loop + scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call workflow.RunScript, otherwise infinite loop if err != nil { return "", fmt.Errorf("failed to run lspci bits script: %v", err) } @@ -622,11 +613,12 @@ func GetTargetCapid4(t target.Target, localTempDir string, noRoot bool) (string, return capid4, nil } +// GetTargetDevices returns the PCI devices of the target. func GetTargetDevices(t target.Target, localTempDir string, noRoot bool) (string, error) { devices := t.GetDevices() if devices == "" { getScript := script.GetScriptByName(script.LspciDevicesScriptName) - scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call common.RunScript, otherwise infinite loop + scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call workflow.RunScript, otherwise infinite loop if err != nil { return "", fmt.Errorf("failed to run lspci devices script: %v", err) } @@ -636,6 +628,7 @@ func GetTargetDevices(t target.Target, localTempDir string, noRoot bool) (string return devices, nil } +// GetTargetImplementer returns the ARM implementer of the target. func GetTargetImplementer(t target.Target, localTempDir string, noRoot bool) (string, error) { implementer := t.GetImplementer() if implementer == "" { @@ -650,6 +643,7 @@ func GetTargetImplementer(t target.Target, localTempDir string, noRoot bool) (st return implementer, nil } +// GetTargetPart returns the ARM part number of the target. func GetTargetPart(t target.Target, localTempDir string, noRoot bool) (string, error) { part := t.GetPart() if part == "" { @@ -664,11 +658,12 @@ func GetTargetPart(t target.Target, localTempDir string, noRoot bool) (string, e return part, nil } +// GetTargetDmidecodePart returns the DMI decode part number of the target. func GetTargetDmidecodePart(t target.Target, localTempDir string, noRoot bool) (string, error) { dmidecodePart := t.GetDmidecodePart() if dmidecodePart == "" { getScript := script.GetScriptByName(script.ArmDmidecodePartScriptName) - scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call common.RunScript, otherwise infinite loop + scriptOutput, err := script.RunScript(t, getScript, localTempDir) // don't call workflow.RunScript, otherwise infinite loop if err != nil { return "", fmt.Errorf("failed to run dmidecode part number script: %v", err) } @@ -678,6 +673,7 @@ func GetTargetDmidecodePart(t target.Target, localTempDir string, noRoot bool) ( return dmidecodePart, nil } +// GetTargetMicroArchitecture returns the microarchitecture of the target CPU. func GetTargetMicroArchitecture(t target.Target, localTempDir string, noRoot bool) (string, error) { uarch := t.GetMicroarchitecture() if uarch == "" { @@ -701,6 +697,7 @@ func GetTargetMicroArchitecture(t target.Target, localTempDir string, noRoot boo return uarch, nil } +// GetX86TargetMicroarchitecture returns the microarchitecture of an x86 target. func GetX86TargetMicroarchitecture(t target.Target, localTempDir string, noRoot bool) (string, error) { family, err := GetTargetFamily(t) if err != nil { @@ -731,6 +728,7 @@ func GetX86TargetMicroarchitecture(t target.Target, localTempDir string, noRoot return cpu.MicroArchitecture, nil } +// GetARMTargetMicroarchitecture returns the microarchitecture of an ARM target. func GetARMTargetMicroarchitecture(t target.Target, localTempDir string, noRoot bool) (string, error) { implementer, err := GetTargetImplementer(t, localTempDir, noRoot) if err != nil { @@ -751,6 +749,7 @@ func GetARMTargetMicroarchitecture(t target.Target, localTempDir string, noRoot return cpu.MicroArchitecture, nil } +// ScriptSupportedOnTarget checks if a script is supported on the target. func ScriptSupportedOnTarget(t target.Target, scriptDef script.ScriptDefinition, localTempDir string, noRoot bool) (bool, error) { if len(scriptDef.Architectures) > 0 { arch, err := GetTargetArchitecture(t) @@ -788,6 +787,7 @@ func ScriptSupportedOnTarget(t target.Target, scriptDef script.ScriptDefinition, return true, nil } +// FilterScriptsForTarget filters scripts to only those supported on the target. func FilterScriptsForTarget(t target.Target, scriptDefs []script.ScriptDefinition, localTempDir string, noRoot bool) (supportedScripts []script.ScriptDefinition, err error) { for _, scriptDef := range scriptDefs { supported, err := ScriptSupportedOnTarget(t, scriptDef, localTempDir, noRoot) @@ -802,8 +802,7 @@ func FilterScriptsForTarget(t target.Target, scriptDefs []script.ScriptDefinitio return } -// Create wrappers around script.RunScript* that first check if the scripts are compatible with the target - +// RunScript runs a script on the target after checking compatibility. func RunScript(t target.Target, s script.ScriptDefinition, localTempDir string, noRoot bool) (script.ScriptOutput, error) { supported, err := ScriptSupportedOnTarget(t, s, localTempDir, noRoot) if err != nil { @@ -815,6 +814,7 @@ func RunScript(t target.Target, s script.ScriptDefinition, localTempDir string, return script.RunScript(t, s, localTempDir) } +// RunScripts runs multiple scripts on the target after checking compatibility. func RunScripts(t target.Target, s []script.ScriptDefinition, continueOnScriptError bool, localTempDir string, statusUpdate progress.MultiSpinnerUpdateFunc, collectingStatusMsg string, noRoot bool) (map[string]script.ScriptOutput, error) { supportedScripts, err := FilterScriptsForTarget(t, s, localTempDir, noRoot) if err != nil { @@ -826,6 +826,7 @@ func RunScripts(t target.Target, s []script.ScriptDefinition, continueOnScriptEr return script.RunScripts(t, supportedScripts, continueOnScriptError, localTempDir, statusUpdate, collectingStatusMsg) } +// RunScriptStream runs a script on the target with streaming output. func RunScriptStream(t target.Target, s script.ScriptDefinition, localTempDir string, stdoutChannel chan []byte, stderrChannel chan []byte, exitcodeChannel chan int, errorChannel chan error, cmdChannel chan *exec.Cmd, noRoot bool) { supported, err := ScriptSupportedOnTarget(t, s, localTempDir, noRoot) if err != nil { diff --git a/internal/common/targets_test.go b/internal/workflow/targets_test.go similarity index 99% rename from internal/common/targets_test.go rename to internal/workflow/targets_test.go index 01cc43af..9e951d01 100644 --- a/internal/common/targets_test.go +++ b/internal/workflow/targets_test.go @@ -1,4 +1,4 @@ -package common +package workflow // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause diff --git a/internal/workflow/workflow.go b/internal/workflow/workflow.go new file mode 100644 index 00000000..330a64ee --- /dev/null +++ b/internal/workflow/workflow.go @@ -0,0 +1,263 @@ +// Package workflow implements the common flow/logic for reporting commands +// (report, telemetry, flamegraph, lock). It handles target management, +// script execution, and report generation. +package workflow + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + + "slices" + + "perfspect/internal/app" + "perfspect/internal/progress" + "perfspect/internal/report" + "perfspect/internal/script" + "perfspect/internal/table" + "perfspect/internal/target" + "perfspect/internal/util" + + "github.com/spf13/cobra" +) + +// TargetScriptOutputs holds the script outputs and tables for a target. +type TargetScriptOutputs struct { + TargetName string + ScriptOutputs map[string]script.ScriptOutput + Tables []table.TableDefinition +} + +// GetScriptOutputs returns the script outputs for the target. +func (tso *TargetScriptOutputs) GetScriptOutputs() map[string]script.ScriptOutput { + return tso.ScriptOutputs +} + +// AdhocFunc is a function type for running ad-hoc actions after report generation. +type AdhocFunc func(app.Context, map[string]script.ScriptOutput, target.Target, progress.MultiSpinnerUpdateFunc) error + +// ReportingCommand represents a command that generates reports from collected data. +type ReportingCommand struct { + Cmd *cobra.Command + ReportNamePost string + Tables []table.TableDefinition + ScriptParams map[string]string + SummaryFunc app.SummaryFunc + SummaryTableName string // e.g., the benchmark or telemetry summary table + SummaryBeforeTableName string // the name of the table that the summary table should be placed before in the report + InsightsFunc app.InsightsFunc + AdhocFunc AdhocFunc + SystemSummaryTableName string // Optional: Only affects xlsx format reports. If set, the table with this name will be used as the "Brief" sheet in the xlsx report. If empty or unset, no "Brief" sheet is generated. +} + +// Run is the common flow/logic for all reporting commands, i.e., 'report', 'telemetry', 'flame', 'lock' +// The individual commands populate the ReportingCommand struct with the details specific to the command +// and then call this Run function. +func (rc *ReportingCommand) Run() error { + // appContext is the application context that holds common data and resources. + appContext := rc.Cmd.Parent().Context().Value(app.Context{}).(app.Context) + timestamp := appContext.Timestamp + localTempDir := appContext.LocalTempDir + outputDir := appContext.OutputDir + logFilePath := appContext.LogFilePath + // create output directory + err := util.CreateDirectoryIfNotExists(outputDir, 0755) // #nosec G301 + if err != nil { + err = fmt.Errorf("failed to create output directory: %w", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + + var myTargets []target.Target + var orderedTargetScriptOutputs []TargetScriptOutputs + if app.FlagInput != "" { + var err error + orderedTargetScriptOutputs, err = outputsFromInput(rc.Tables, rc.SummaryTableName) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + } else { + // get the targets + var targetErrs []error + var err error + myTargets, targetErrs, err = GetTargets(rc.Cmd, elevatedPrivilegesRequired(rc.Tables), false, localTempDir) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + // schedule the cleanup of the temporary directory on each target (if not debugging) + if rc.Cmd.Parent().PersistentFlags().Lookup("debug").Value.String() != "true" { + for _, myTarget := range myTargets { + if myTarget.GetTempDirectory() != "" { + deferTarget := myTarget // create a new variable to capture the current value + defer func(deferTarget target.Target) { + err := deferTarget.RemoveTempDirectory() + if err != nil { + slog.Error("error removing target temporary directory", slog.String("error", err.Error())) + } + }(deferTarget) + } + } + } + // setup and start the progress indicator + multiSpinner := progress.NewMultiSpinner() + for _, target := range myTargets { + err := multiSpinner.AddSpinner(target.GetName()) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + } + multiSpinner.Start() + // remove targets that had errors + var indicesToRemove []int + for i := range targetErrs { + if targetErrs[i] != nil { + _ = multiSpinner.Status(myTargets[i].GetName(), fmt.Sprintf("Error: %v", targetErrs[i])) + indicesToRemove = append(indicesToRemove, i) + } + } + for i := len(indicesToRemove) - 1; i >= 0; i-- { + myTargets = slices.Delete(myTargets, indicesToRemove[i], indicesToRemove[i]+1) + } + // set up signal handler to help with cleaning up child processes on ctrl-c/SIGINT or SIGTERM + configureSignalHandler(myTargets, multiSpinner.Status) + // collect data from targets + orderedTargetScriptOutputs, err = outputsFromTargets(rc.Cmd, myTargets, rc.Tables, rc.ScriptParams, multiSpinner.Status, localTempDir) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + // stop the progress indicator + multiSpinner.Finish() + fmt.Println() + // exit with error if no targets remain + if len(myTargets) == 0 { + err := fmt.Errorf("no successful targets found") + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + } + // create the raw report before processing the data, so that we can save the raw data even if there is an error while processing + var rawReports []string + rawReports, err = rc.createRawReports(appContext, orderedTargetScriptOutputs) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + // check report formats + formats := app.FlagFormat + if slices.Contains(formats, report.FormatAll) { + formats = report.FormatOptions + } + // process the collected data and create the requested report(s) + reportFilePaths, err := rc.createReports(appContext, orderedTargetScriptOutputs, formats) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + // if we are debugging, create a tgz archive with the raw reports, formatted reports, and log file + if appContext.Debug { + archiveFiles := append(reportFilePaths, rawReports...) + if len(archiveFiles) > 0 { + if logFilePath != "" { + archiveFiles = append(archiveFiles, logFilePath) + } + err := util.CreateFlatTGZ(archiveFiles, filepath.Join(outputDir, app.Name+"_"+timestamp+".tgz")) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + } + } + if len(reportFilePaths) > 0 { + fmt.Println("Report files:") + } + for _, reportFilePath := range reportFilePaths { + fmt.Printf(" %s\n", reportFilePath) + } + // lastly, run any adhoc actions + if rc.AdhocFunc != nil { + fmt.Println() + // setup and start the progress indicator + multiSpinner := progress.NewMultiSpinner() + for _, target := range myTargets { + err := multiSpinner.AddSpinner(target.GetName()) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + slog.Error(err.Error()) + rc.Cmd.SilenceUsage = true + return err + } + } + multiSpinner.Start() + adhocErrorChannel := make(chan error) + for i, t := range myTargets { + go func(target target.Target, i int) { + err := rc.AdhocFunc(appContext, orderedTargetScriptOutputs[i].ScriptOutputs, target, multiSpinner.Status) + adhocErrorChannel <- err + }(t, i) + } + // wait for all adhoc actions to complete, errors were reported by the AdhocFunc + for range myTargets { + <-adhocErrorChannel + } + // stop the progress indicator + multiSpinner.Finish() + fmt.Println() + } + return nil +} + +// DefaultInsightsFunc returns the insights table values from the table values +func DefaultInsightsFunc(allTableValues []table.TableValues, scriptOutputs map[string]script.ScriptOutput) table.TableValues { + insightsTableValues := table.TableValues{ + TableDefinition: table.TableDefinition{ + Name: app.TableNameInsights, + HasRows: true, + MenuLabel: app.TableNameInsights, + }, + Fields: []table.Field{ + {Name: "Recommendation", Values: []string{}}, + {Name: "Justification", Values: []string{}}, + }, + } + for _, tableValues := range allTableValues { + for _, insight := range tableValues.Insights { + insightsTableValues.Fields[0].Values = append(insightsTableValues.Fields[0].Values, insight.Recommendation) + insightsTableValues.Fields[1].Values = append(insightsTableValues.Fields[1].Values, insight.Justification) + } + } + return insightsTableValues +} + +// FlagValidationError is used to report an error with a flag +func FlagValidationError(cmd *cobra.Command, msg string) error { + err := fmt.Errorf("%s", msg) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + fmt.Fprintf(os.Stderr, "See '%s --help' for usage details.\n", cmd.CommandPath()) + cmd.SilenceUsage = true + return err +} From 020c688c728de8b56bb89b4161c55c5b53599374 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 16:08:28 -0800 Subject: [PATCH 2/8] copyright at top of package files Signed-off-by: Harper, Jason M --- cmd/benchmark/benchmark.go | 6 +++--- cmd/config/config.go | 6 +++--- cmd/config/restore.go | 6 +++--- cmd/flamegraph/flamegraph.go | 6 +++--- cmd/lock/lock.go | 6 +++--- cmd/metrics/metrics.go | 6 +++--- cmd/report/report.go | 6 +++--- cmd/root.go | 6 +++--- cmd/telemetry/telemetry.go | 6 +++--- internal/app/app.go | 6 +++--- internal/cpus/cpus.go | 6 +++--- internal/extract/extract.go | 6 +++--- internal/report/report.go | 6 +++--- internal/script/script.go | 6 +++--- internal/util/util.go | 6 +++--- internal/workflow/workflow.go | 6 +++--- 16 files changed, 48 insertions(+), 48 deletions(-) diff --git a/cmd/benchmark/benchmark.go b/cmd/benchmark/benchmark.go index 99c02937..d7202dad 100644 --- a/cmd/benchmark/benchmark.go +++ b/cmd/benchmark/benchmark.go @@ -1,9 +1,9 @@ -// Package benchmark is a subcommand of the root command. It runs performance benchmarks on target(s). -package benchmark - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package benchmark is a subcommand of the root command. It runs performance benchmarks on target(s). +package benchmark + import ( "fmt" "log/slog" diff --git a/cmd/config/config.go b/cmd/config/config.go index c23c100d..6e93aab7 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -1,9 +1,9 @@ -// Package config is a subcommand of the root command. It sets system configuration items on target platform(s). -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package config is a subcommand of the root command. It sets system configuration items on target platform(s). +package config + import ( "fmt" "log/slog" diff --git a/cmd/config/restore.go b/cmd/config/restore.go index 71f17217..c6e22b63 100644 --- a/cmd/config/restore.go +++ b/cmd/config/restore.go @@ -1,9 +1,9 @@ -// Package config is a subcommand of the root command. It sets system configuration items on target platform(s). -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package config is a subcommand of the root command. It sets system configuration items on target platform(s). +package config + import ( "bufio" "bytes" diff --git a/cmd/flamegraph/flamegraph.go b/cmd/flamegraph/flamegraph.go index 8547c16d..802b5b3b 100644 --- a/cmd/flamegraph/flamegraph.go +++ b/cmd/flamegraph/flamegraph.go @@ -1,9 +1,9 @@ -// Package flamegraph is a subcommand of the root command. It is used to generate flamegraphs from target(s). -package flamegraph - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package flamegraph is a subcommand of the root command. It is used to generate flamegraphs from target(s). +package flamegraph + import ( "fmt" "os" diff --git a/cmd/lock/lock.go b/cmd/lock/lock.go index 11914467..7c3662aa 100755 --- a/cmd/lock/lock.go +++ b/cmd/lock/lock.go @@ -1,9 +1,9 @@ -// Package lock is a subcommand of the root command. It is used to collect kernel lock related perf information from target(s). -package lock - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package lock is a subcommand of the root command. It is used to collect kernel lock related perf information from target(s). +package lock + import ( "fmt" "path/filepath" diff --git a/cmd/metrics/metrics.go b/cmd/metrics/metrics.go index 4bc0a710..da32ee8b 100644 --- a/cmd/metrics/metrics.go +++ b/cmd/metrics/metrics.go @@ -1,9 +1,9 @@ -// Package metrics is a subcommand of the root command. It provides functionality to collect performance metrics from target(s). -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package metrics is a subcommand of the root command. It provides functionality to collect performance metrics from target(s). +package metrics + import ( "context" "embed" diff --git a/cmd/report/report.go b/cmd/report/report.go index 690b619e..78d978ed 100644 --- a/cmd/report/report.go +++ b/cmd/report/report.go @@ -1,9 +1,9 @@ -// Package report is a subcommand of the root command. It generates a configuration report for target(s). -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package report is a subcommand of the root command. It generates a configuration report for target(s). +package report + import ( "fmt" "slices" diff --git a/cmd/root.go b/cmd/root.go index d947a72a..91c2c0e2 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,9 +1,9 @@ -// Package cmd provides the command line interface for the application. -package cmd - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package cmd provides the command line interface for the application. +package cmd + import ( "bytes" "context" diff --git a/cmd/telemetry/telemetry.go b/cmd/telemetry/telemetry.go index 1833f3eb..5c8864b8 100644 --- a/cmd/telemetry/telemetry.go +++ b/cmd/telemetry/telemetry.go @@ -1,9 +1,9 @@ -// Package telemetry is a subcommand of the root command. It collects system telemetry from target(s). -package telemetry - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package telemetry is a subcommand of the root command. It collects system telemetry from target(s). +package telemetry + import ( "fmt" "log/slog" diff --git a/internal/app/app.go b/internal/app/app.go index d1902713..8a3b4eed 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -1,10 +1,10 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + // Package app defines application-wide types, constants, and context // that are shared across multiple commands. package app -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "os" "path/filepath" diff --git a/internal/cpus/cpus.go b/internal/cpus/cpus.go index 7c83452a..0f1171a5 100644 --- a/internal/cpus/cpus.go +++ b/internal/cpus/cpus.go @@ -1,10 +1,10 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + // Package cpus provides CPU definitions and lookup utilities for microarchitecture, // family, model, and stepping, supporting both x86 and ARM architectures. package cpus -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "fmt" "regexp" diff --git a/internal/extract/extract.go b/internal/extract/extract.go index c828bd56..fc58d58d 100644 --- a/internal/extract/extract.go +++ b/internal/extract/extract.go @@ -1,10 +1,10 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + // Package extract provides helper functions for extracting values from script outputs // to populate table fields for reports. package extract -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "log/slog" "regexp" diff --git a/internal/report/report.go b/internal/report/report.go index 2fdfc03c..be34e9ec 100644 --- a/internal/report/report.go +++ b/internal/report/report.go @@ -1,9 +1,9 @@ -// Package report provides functions to generate reports in various formats such as txt, json, html, xlsx. -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package report provides functions to generate reports in various formats such as txt, json, html, xlsx. +package report + import ( "fmt" "perfspect/internal/table" diff --git a/internal/script/script.go b/internal/script/script.go index 7d06d044..1fe759ad 100644 --- a/internal/script/script.go +++ b/internal/script/script.go @@ -1,9 +1,9 @@ -// Package script provides functions to run scripts on a target and get the output. -package script - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Package script provides functions to run scripts on a target and get the output. +package script + import ( "embed" "fmt" diff --git a/internal/util/util.go b/internal/util/util.go index fca89027..a2d3d099 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,11 +1,11 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + /* Package util includes utility/helper functions that may be useful to other modules. */ package util -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "archive/tar" "compress/gzip" diff --git a/internal/workflow/workflow.go b/internal/workflow/workflow.go index 330a64ee..eef4f5ed 100644 --- a/internal/workflow/workflow.go +++ b/internal/workflow/workflow.go @@ -1,11 +1,11 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + // Package workflow implements the common flow/logic for reporting commands // (report, telemetry, flamegraph, lock). It handles target management, // script execution, and report generation. package workflow -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "fmt" "log/slog" From 56402de17013a89c92e7da707ba0c0ed97a74afa Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 16:08:42 -0800 Subject: [PATCH 3/8] missed one Signed-off-by: Harper, Jason M --- internal/util/util.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/util/util.go b/internal/util/util.go index a2d3d099..3c21ab51 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,9 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -/* -Package util includes utility/helper functions that may be useful to other modules. -*/ +// Package util includes utility/helper functions that may be useful to other modules. package util import ( From 42295c56e622c71faf7d1b8f6242f830e8ce9f49 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 16:18:20 -0800 Subject: [PATCH 4/8] copyright first Signed-off-by: Harper, Jason M --- cmd/benchmark/benchmark_renderers.go | 4 ++-- cmd/benchmark/benchmark_tables.go | 4 ++-- cmd/benchmark/benchmarking.go | 4 ++-- cmd/config/config_tables.go | 4 ++-- cmd/config/flag.go | 4 ++-- cmd/config/flag_groups.go | 4 ++-- cmd/config/flag_groups_test.go | 4 ++-- cmd/config/flag_test.go | 4 ++-- cmd/config/restore_test.go | 4 ++-- cmd/config/set.go | 6 +++--- cmd/config/set_test.go | 4 ++-- cmd/flamegraph/flamegraph_renderers.go | 4 ++-- cmd/flamegraph/flamegraph_tables.go | 4 ++-- cmd/lock/lock_renderers.go | 4 ++-- cmd/lock/lock_tables.go | 4 ++-- cmd/metrics/event_frame.go | 4 ++-- cmd/metrics/event_frame_test.go | 4 ++-- cmd/metrics/loader.go | 4 ++-- cmd/metrics/loader_component.go | 4 ++-- cmd/metrics/loader_component_test.go | 4 ++-- cmd/metrics/loader_legacy.go | 4 ++-- cmd/metrics/loader_perfmon.go | 4 ++-- cmd/metrics/loader_perfmon_event_core.go | 4 ++-- cmd/metrics/loader_perfmon_event_other.go | 6 +++--- cmd/metrics/loader_perfmon_event_uncore.go | 4 ++-- cmd/metrics/loader_perfmon_group_core.go | 4 ++-- cmd/metrics/loader_perfmon_group_other.go | 4 ++-- cmd/metrics/loader_perfmon_group_uncore.go | 4 ++-- cmd/metrics/loader_perfmon_group_uncore_test.go | 4 ++-- cmd/metrics/loader_util.go | 4 ++-- cmd/metrics/loader_util_test.go | 4 ++-- cmd/metrics/metadata.go | 4 ++-- cmd/metrics/metadata_aarch.go | 4 ++-- cmd/metrics/metadata_x86.go | 4 ++-- cmd/metrics/metric.go | 4 ++-- cmd/metrics/metrics_server.go | 4 ++-- cmd/metrics/nmi_watchdog.go | 4 ++-- cmd/metrics/perf.go | 4 ++-- cmd/metrics/perf_mux.go | 4 ++-- cmd/metrics/print.go | 4 ++-- cmd/metrics/process.go | 4 ++-- cmd/metrics/summary.go | 4 ++-- cmd/metrics/summary_test.go | 4 ++-- cmd/metrics/trim.go | 4 ++-- cmd/report/report_tables.go | 4 ++-- cmd/telemetry/telemetry_renderers.go | 4 ++-- cmd/telemetry/telemetry_tables.go | 4 ++-- internal/extract/cache_test.go | 4 ++-- internal/extract/cpu_test.go | 6 +++--- internal/extract/extract_test.go | 4 ++-- internal/extract/frequency_test.go | 4 ++-- internal/extract/nic_test.go | 4 ++-- internal/extract/os.go | 6 +++--- internal/extract/turbostat_test.go | 6 +++--- internal/progress/multispinner.go | 6 +++--- internal/progress/multispinner_test.go | 4 ++-- internal/report/render_excel.go | 4 ++-- internal/report/render_html.go | 4 ++-- internal/report/render_json.go | 4 ++-- internal/report/render_raw.go | 4 ++-- internal/report/render_text.go | 4 ++-- internal/script/script_defs.go | 6 +++--- internal/script/script_test.go | 4 ++-- internal/target/helpers.go | 4 ++-- internal/target/local_target.go | 4 ++-- internal/target/local_target_test.go | 4 ++-- internal/target/remote_target.go | 4 ++-- internal/target/target.go | 6 +++--- internal/target/target_test.go | 4 ++-- internal/util/util_test.go | 4 ++-- internal/workflow/targets_test.go | 4 ++-- main.go | 4 ++-- tools/stackcollapse-perf/stackcollapse-perf.go | 4 ++-- tools/stackcollapse-perf/stackcollapse-perf_test.go | 4 ++-- tools/tsc/tsc.go | 4 ++-- tools/tsc/tsc_amd64.go | 6 +++--- 76 files changed, 161 insertions(+), 161 deletions(-) diff --git a/cmd/benchmark/benchmark_renderers.go b/cmd/benchmark/benchmark_renderers.go index f0c4bb82..73b5d9a2 100644 --- a/cmd/benchmark/benchmark_renderers.go +++ b/cmd/benchmark/benchmark_renderers.go @@ -1,8 +1,8 @@ -package benchmark - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package benchmark + import ( "fmt" "log/slog" diff --git a/cmd/benchmark/benchmark_tables.go b/cmd/benchmark/benchmark_tables.go index 3d673f89..b447668b 100644 --- a/cmd/benchmark/benchmark_tables.go +++ b/cmd/benchmark/benchmark_tables.go @@ -1,8 +1,8 @@ -package benchmark - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package benchmark + import ( "fmt" "log/slog" diff --git a/cmd/benchmark/benchmarking.go b/cmd/benchmark/benchmarking.go index 483d840c..2564fb51 100644 --- a/cmd/benchmark/benchmarking.go +++ b/cmd/benchmark/benchmarking.go @@ -1,8 +1,8 @@ -package benchmark - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package benchmark + import ( "encoding/json" "fmt" diff --git a/cmd/config/config_tables.go b/cmd/config/config_tables.go index f25bf7f9..58f06bfe 100644 --- a/cmd/config/config_tables.go +++ b/cmd/config/config_tables.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "fmt" "log/slog" diff --git a/cmd/config/flag.go b/cmd/config/flag.go index 4a8e8ccb..d7127ad4 100644 --- a/cmd/config/flag.go +++ b/cmd/config/flag.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "perfspect/internal/target" diff --git a/cmd/config/flag_groups.go b/cmd/config/flag_groups.go index ac24682a..114286ae 100644 --- a/cmd/config/flag_groups.go +++ b/cmd/config/flag_groups.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "fmt" "regexp" diff --git a/cmd/config/flag_groups_test.go b/cmd/config/flag_groups_test.go index e8bfb77f..4efea6d2 100644 --- a/cmd/config/flag_groups_test.go +++ b/cmd/config/flag_groups_test.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "bytes" "testing" diff --git a/cmd/config/flag_test.go b/cmd/config/flag_test.go index 8e28b847..b39b73e5 100644 --- a/cmd/config/flag_test.go +++ b/cmd/config/flag_test.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "testing" diff --git a/cmd/config/restore_test.go b/cmd/config/restore_test.go index 10bbbb4b..056536fb 100644 --- a/cmd/config/restore_test.go +++ b/cmd/config/restore_test.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "bytes" "io" diff --git a/cmd/config/set.go b/cmd/config/set.go index ca91a0a8..344af629 100644 --- a/cmd/config/set.go +++ b/cmd/config/set.go @@ -1,3 +1,6 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package config import ( @@ -18,9 +21,6 @@ import ( "sync" ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - var uncoreDieFrequencyMutex sync.Mutex var uncoreFrequencyMutex sync.Mutex diff --git a/cmd/config/set_test.go b/cmd/config/set_test.go index 218f1642..471f5790 100644 --- a/cmd/config/set_test.go +++ b/cmd/config/set_test.go @@ -1,8 +1,8 @@ -package config - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package config + import ( "testing" diff --git a/cmd/flamegraph/flamegraph_renderers.go b/cmd/flamegraph/flamegraph_renderers.go index 8838dd88..024d77e3 100644 --- a/cmd/flamegraph/flamegraph_renderers.go +++ b/cmd/flamegraph/flamegraph_renderers.go @@ -1,8 +1,8 @@ -package flamegraph - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package flamegraph + import ( "bufio" "bytes" diff --git a/cmd/flamegraph/flamegraph_tables.go b/cmd/flamegraph/flamegraph_tables.go index 54e59e30..054e6874 100644 --- a/cmd/flamegraph/flamegraph_tables.go +++ b/cmd/flamegraph/flamegraph_tables.go @@ -1,8 +1,8 @@ -package flamegraph - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package flamegraph + import ( "fmt" "log/slog" diff --git a/cmd/lock/lock_renderers.go b/cmd/lock/lock_renderers.go index 4df29ca2..2ee68b52 100644 --- a/cmd/lock/lock_renderers.go +++ b/cmd/lock/lock_renderers.go @@ -1,8 +1,8 @@ -package lock - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package lock + import ( htmltemplate "html/template" "perfspect/internal/report" diff --git a/cmd/lock/lock_tables.go b/cmd/lock/lock_tables.go index 8fad25e4..8e62349d 100644 --- a/cmd/lock/lock_tables.go +++ b/cmd/lock/lock_tables.go @@ -1,8 +1,8 @@ -package lock - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package lock + import ( "perfspect/internal/extract" diff --git a/cmd/metrics/event_frame.go b/cmd/metrics/event_frame.go index d756afe5..031ac0bf 100644 --- a/cmd/metrics/event_frame.go +++ b/cmd/metrics/event_frame.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // Linux perf event output, i.e., from 'perf stat' parsing and processing helper functions import ( diff --git a/cmd/metrics/event_frame_test.go b/cmd/metrics/event_frame_test.go index a641748e..56c4c08c 100644 --- a/cmd/metrics/event_frame_test.go +++ b/cmd/metrics/event_frame_test.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import "testing" func TestExtractInterval(t *testing.T) { diff --git a/cmd/metrics/loader.go b/cmd/metrics/loader.go index 17184a94..34874779 100644 --- a/cmd/metrics/loader.go +++ b/cmd/metrics/loader.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "log/slog" diff --git a/cmd/metrics/loader_component.go b/cmd/metrics/loader_component.go index f6166921..7e191dd6 100644 --- a/cmd/metrics/loader_component.go +++ b/cmd/metrics/loader_component.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/metrics/loader_component_test.go b/cmd/metrics/loader_component_test.go index 5366df51..3b6ec4eb 100644 --- a/cmd/metrics/loader_component_test.go +++ b/cmd/metrics/loader_component_test.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "reflect" "testing" diff --git a/cmd/metrics/loader_legacy.go b/cmd/metrics/loader_legacy.go index 1009b081..8fdcef1c 100644 --- a/cmd/metrics/loader_legacy.go +++ b/cmd/metrics/loader_legacy.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "bufio" "encoding/json" diff --git a/cmd/metrics/loader_perfmon.go b/cmd/metrics/loader_perfmon.go index 0c73f043..7f135c72 100644 --- a/cmd/metrics/loader_perfmon.go +++ b/cmd/metrics/loader_perfmon.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/metrics/loader_perfmon_event_core.go b/cmd/metrics/loader_perfmon_event_core.go index d8cfe82b..f87131dc 100644 --- a/cmd/metrics/loader_perfmon_event_core.go +++ b/cmd/metrics/loader_perfmon_event_core.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/metrics/loader_perfmon_event_other.go b/cmd/metrics/loader_perfmon_event_other.go index 1978941a..b112491a 100644 --- a/cmd/metrics/loader_perfmon_event_other.go +++ b/cmd/metrics/loader_perfmon_event_other.go @@ -1,3 +1,6 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package metrics import ( @@ -6,9 +9,6 @@ import ( "strings" ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - type OtherEvent struct { EventName string } diff --git a/cmd/metrics/loader_perfmon_event_uncore.go b/cmd/metrics/loader_perfmon_event_uncore.go index c79dbf3c..647e01f6 100644 --- a/cmd/metrics/loader_perfmon_event_uncore.go +++ b/cmd/metrics/loader_perfmon_event_uncore.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/metrics/loader_perfmon_group_core.go b/cmd/metrics/loader_perfmon_group_core.go index 33aafa27..038b4bd6 100644 --- a/cmd/metrics/loader_perfmon_group_core.go +++ b/cmd/metrics/loader_perfmon_group_core.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "io" diff --git a/cmd/metrics/loader_perfmon_group_other.go b/cmd/metrics/loader_perfmon_group_other.go index dedcb94d..c49e8bb7 100644 --- a/cmd/metrics/loader_perfmon_group_other.go +++ b/cmd/metrics/loader_perfmon_group_other.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "io" diff --git a/cmd/metrics/loader_perfmon_group_uncore.go b/cmd/metrics/loader_perfmon_group_uncore.go index 1996befe..721eb970 100644 --- a/cmd/metrics/loader_perfmon_group_uncore.go +++ b/cmd/metrics/loader_perfmon_group_uncore.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "io" diff --git a/cmd/metrics/loader_perfmon_group_uncore_test.go b/cmd/metrics/loader_perfmon_group_uncore_test.go index bef41b29..ec1edfb6 100644 --- a/cmd/metrics/loader_perfmon_group_uncore_test.go +++ b/cmd/metrics/loader_perfmon_group_uncore_test.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "testing" diff --git a/cmd/metrics/loader_util.go b/cmd/metrics/loader_util.go index cfa77914..c83197a9 100644 --- a/cmd/metrics/loader_util.go +++ b/cmd/metrics/loader_util.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "log/slog" diff --git a/cmd/metrics/loader_util_test.go b/cmd/metrics/loader_util_test.go index ba64b592..8c1c591b 100644 --- a/cmd/metrics/loader_util_test.go +++ b/cmd/metrics/loader_util_test.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import "testing" func TestTransformConditional(t *testing.T) { diff --git a/cmd/metrics/metadata.go b/cmd/metrics/metadata.go index 825c7db9..db65de7e 100644 --- a/cmd/metrics/metadata.go +++ b/cmd/metrics/metadata.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // metadata.go defines structures and functions to hold information about the platform // to be used during data collection and metric production. // diff --git a/cmd/metrics/metadata_aarch.go b/cmd/metrics/metadata_aarch.go index edbcf921..b8f4b56d 100644 --- a/cmd/metrics/metadata_aarch.go +++ b/cmd/metrics/metadata_aarch.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // metadata_aarch.go contains ARM/aarch64 metadata collection logic. import ( diff --git a/cmd/metrics/metadata_x86.go b/cmd/metrics/metadata_x86.go index 47078175..6f6a108a 100644 --- a/cmd/metrics/metadata_x86.go +++ b/cmd/metrics/metadata_x86.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // metadata_x86.go contains x86_64 (Intel/AMD) metadata collection logic. import ( diff --git a/cmd/metrics/metric.go b/cmd/metrics/metric.go index 8619cae6..019d3e9d 100644 --- a/cmd/metrics/metric.go +++ b/cmd/metrics/metric.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // metric generation type defintions and helper functions import ( diff --git a/cmd/metrics/metrics_server.go b/cmd/metrics/metrics_server.go index 446133de..6538efb2 100644 --- a/cmd/metrics/metrics_server.go +++ b/cmd/metrics/metrics_server.go @@ -1,8 +1,8 @@ -package metrics - // Copyright 2025 Google LLC. // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "log/slog" diff --git a/cmd/metrics/nmi_watchdog.go b/cmd/metrics/nmi_watchdog.go index a08af437..4ba0d501 100644 --- a/cmd/metrics/nmi_watchdog.go +++ b/cmd/metrics/nmi_watchdog.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // nmi_watchdog provides helper functions for enabling and disabling the NMI (non-maskable interrupt) watchdog import ( diff --git a/cmd/metrics/perf.go b/cmd/metrics/perf.go index 295689f0..392c9a84 100644 --- a/cmd/metrics/perf.go +++ b/cmd/metrics/perf.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "fmt" "strings" diff --git a/cmd/metrics/perf_mux.go b/cmd/metrics/perf_mux.go index ecce80db..2445fde3 100644 --- a/cmd/metrics/perf_mux.go +++ b/cmd/metrics/perf_mux.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // Linux perf event/group multiplexing interval helper functions import ( diff --git a/cmd/metrics/print.go b/cmd/metrics/print.go index 6299b9d7..148b5796 100644 --- a/cmd/metrics/print.go +++ b/cmd/metrics/print.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/metrics/process.go b/cmd/metrics/process.go index b8b31017..2ee19b24 100644 --- a/cmd/metrics/process.go +++ b/cmd/metrics/process.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // Linux process information helper functions import ( diff --git a/cmd/metrics/summary.go b/cmd/metrics/summary.go index e61348cc..fafbd686 100644 --- a/cmd/metrics/summary.go +++ b/cmd/metrics/summary.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + // functions to create summary (mean,min,max,stddev) metrics from metrics CSV import ( diff --git a/cmd/metrics/summary_test.go b/cmd/metrics/summary_test.go index c4aaa218..8bd0d294 100644 --- a/cmd/metrics/summary_test.go +++ b/cmd/metrics/summary_test.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "testing" diff --git a/cmd/metrics/trim.go b/cmd/metrics/trim.go index 1292fe89..6dff795e 100644 --- a/cmd/metrics/trim.go +++ b/cmd/metrics/trim.go @@ -1,8 +1,8 @@ -package metrics - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package metrics + import ( "encoding/json" "fmt" diff --git a/cmd/report/report_tables.go b/cmd/report/report_tables.go index 1558614c..8c1f8584 100644 --- a/cmd/report/report_tables.go +++ b/cmd/report/report_tables.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + // table_defs.go defines the tables used for generating reports import ( diff --git a/cmd/telemetry/telemetry_renderers.go b/cmd/telemetry/telemetry_renderers.go index e1e87e72..f88bd392 100644 --- a/cmd/telemetry/telemetry_renderers.go +++ b/cmd/telemetry/telemetry_renderers.go @@ -1,8 +1,8 @@ -package telemetry - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package telemetry + import ( "fmt" "log/slog" diff --git a/cmd/telemetry/telemetry_tables.go b/cmd/telemetry/telemetry_tables.go index 1cce8228..ec9209b7 100644 --- a/cmd/telemetry/telemetry_tables.go +++ b/cmd/telemetry/telemetry_tables.go @@ -1,8 +1,8 @@ -package telemetry - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package telemetry + import ( "encoding/csv" "fmt" diff --git a/internal/extract/cache_test.go b/internal/extract/cache_test.go index 16b6642b..78ff0074 100644 --- a/internal/extract/cache_test.go +++ b/internal/extract/cache_test.go @@ -1,8 +1,8 @@ -package extract - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "testing" diff --git a/internal/extract/cpu_test.go b/internal/extract/cpu_test.go index 3a2bcb9e..5fcc257d 100644 --- a/internal/extract/cpu_test.go +++ b/internal/extract/cpu_test.go @@ -1,3 +1,6 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package extract import ( @@ -5,9 +8,6 @@ import ( "testing" ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - func TestHyperthreadingFromOutput(t *testing.T) { tests := []struct { name string diff --git a/internal/extract/extract_test.go b/internal/extract/extract_test.go index 4afbacd5..8c9a5220 100644 --- a/internal/extract/extract_test.go +++ b/internal/extract/extract_test.go @@ -1,8 +1,8 @@ -package extract - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "reflect" "testing" diff --git a/internal/extract/frequency_test.go b/internal/extract/frequency_test.go index 8250225f..02720555 100644 --- a/internal/extract/frequency_test.go +++ b/internal/extract/frequency_test.go @@ -1,8 +1,8 @@ -package extract - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "reflect" "testing" diff --git a/internal/extract/nic_test.go b/internal/extract/nic_test.go index 130a4a23..cd539b20 100644 --- a/internal/extract/nic_test.go +++ b/internal/extract/nic_test.go @@ -1,8 +1,8 @@ -package extract - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package extract + import ( "testing" ) diff --git a/internal/extract/os.go b/internal/extract/os.go index cddabebc..1c495211 100644 --- a/internal/extract/os.go +++ b/internal/extract/os.go @@ -1,10 +1,10 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package extract import "perfspect/internal/script" -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - // OperatingSystemFromOutput returns the operating system from script outputs. func OperatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { os := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) diff --git a/internal/extract/turbostat_test.go b/internal/extract/turbostat_test.go index b0f85c55..83f479ba 100644 --- a/internal/extract/turbostat_test.go +++ b/internal/extract/turbostat_test.go @@ -1,3 +1,6 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package extract import ( @@ -6,9 +9,6 @@ import ( "testing" ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - const turbostatOutput = `TIME: 15:04:05 INTERVAL: 2 Package Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IPC IRQ SMI POLL C1 C1E C6 POLL% C1% C1E% C6% CPU%c1 CPU%c6 CoreTmp CoreThr PkgTmp Pkg%pc2 Pkg%pc6 PkgWatt RAMWatt PKG_% RAM_% UncMHz diff --git a/internal/progress/multispinner.go b/internal/progress/multispinner.go index 1368aada..19ca0872 100644 --- a/internal/progress/multispinner.go +++ b/internal/progress/multispinner.go @@ -1,11 +1,11 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + /* Package progress provides CLI progress bar options. */ package progress -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "fmt" "os" diff --git a/internal/progress/multispinner_test.go b/internal/progress/multispinner_test.go index 188776cd..6e0e4633 100644 --- a/internal/progress/multispinner_test.go +++ b/internal/progress/multispinner_test.go @@ -1,8 +1,8 @@ -package progress - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package progress + import ( "testing" ) diff --git a/internal/report/render_excel.go b/internal/report/render_excel.go index 98bac2a0..9e89111a 100644 --- a/internal/report/render_excel.go +++ b/internal/report/render_excel.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + import ( "bufio" "bytes" diff --git a/internal/report/render_html.go b/internal/report/render_html.go index 31407c23..5cc948a3 100644 --- a/internal/report/render_html.go +++ b/internal/report/render_html.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + import ( "bytes" "fmt" diff --git a/internal/report/render_json.go b/internal/report/render_json.go index 42d7311f..13ab69f6 100644 --- a/internal/report/render_json.go +++ b/internal/report/render_json.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + import ( "encoding/json" "perfspect/internal/table" diff --git a/internal/report/render_raw.go b/internal/report/render_raw.go index 6451429a..ab461a29 100644 --- a/internal/report/render_raw.go +++ b/internal/report/render_raw.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + import ( "encoding/json" "fmt" diff --git a/internal/report/render_text.go b/internal/report/render_text.go index fa2814f5..67129a2a 100644 --- a/internal/report/render_text.go +++ b/internal/report/render_text.go @@ -1,8 +1,8 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package report + import ( "fmt" "perfspect/internal/table" diff --git a/internal/script/script_defs.go b/internal/script/script_defs.go index aa59a005..6d22bdc4 100644 --- a/internal/script/script_defs.go +++ b/internal/script/script_defs.go @@ -1,3 +1,6 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + package script import ( @@ -6,9 +9,6 @@ import ( texttemplate "text/template" // nosemgrep ) -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - // script_defs.go defines the bash scripts that are used to collect information from target systems type ScriptDefinition struct { diff --git a/internal/script/script_test.go b/internal/script/script_test.go index 9c90bc2d..3a271c03 100644 --- a/internal/script/script_test.go +++ b/internal/script/script_test.go @@ -1,8 +1,8 @@ -package script - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package script + import ( "os" "os/exec" diff --git a/internal/target/helpers.go b/internal/target/helpers.go index 9deebe42..6b7b854b 100644 --- a/internal/target/helpers.go +++ b/internal/target/helpers.go @@ -1,8 +1,8 @@ -package target - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package target + import ( "bufio" "context" diff --git a/internal/target/local_target.go b/internal/target/local_target.go index 36822006..74db072c 100644 --- a/internal/target/local_target.go +++ b/internal/target/local_target.go @@ -1,8 +1,8 @@ -package target - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package target + import ( "io" "log/slog" diff --git a/internal/target/local_target_test.go b/internal/target/local_target_test.go index 630b0bea..d2fcb33d 100644 --- a/internal/target/local_target_test.go +++ b/internal/target/local_target_test.go @@ -1,8 +1,8 @@ -package target - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package target + import ( "os" "slices" diff --git a/internal/target/remote_target.go b/internal/target/remote_target.go index ffff3388..f75313db 100644 --- a/internal/target/remote_target.go +++ b/internal/target/remote_target.go @@ -1,8 +1,8 @@ -package target - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package target + import ( "fmt" "log/slog" diff --git a/internal/target/target.go b/internal/target/target.go index 6533436a..19ced087 100644 --- a/internal/target/target.go +++ b/internal/target/target.go @@ -1,11 +1,11 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + /* Package target provides a way to interact with local and remote systems. */ package target -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - import ( "os" "os/exec" diff --git a/internal/target/target_test.go b/internal/target/target_test.go index 1eff9418..55298ef3 100644 --- a/internal/target/target_test.go +++ b/internal/target/target_test.go @@ -1,8 +1,8 @@ -package target - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package target + import ( "testing" ) diff --git a/internal/util/util_test.go b/internal/util/util_test.go index 7b061254..4eac761b 100644 --- a/internal/util/util_test.go +++ b/internal/util/util_test.go @@ -1,8 +1,8 @@ -package util - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package util + import ( "archive/tar" "compress/gzip" diff --git a/internal/workflow/targets_test.go b/internal/workflow/targets_test.go index 9e951d01..4dd53651 100644 --- a/internal/workflow/targets_test.go +++ b/internal/workflow/targets_test.go @@ -1,8 +1,8 @@ -package workflow - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package workflow + import ( "testing" diff --git a/main.go b/main.go index cc1c5f41..5792f977 100644 --- a/main.go +++ b/main.go @@ -1,8 +1,8 @@ -package main - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package main + import ( "fmt" "os" diff --git a/tools/stackcollapse-perf/stackcollapse-perf.go b/tools/stackcollapse-perf/stackcollapse-perf.go index e6bf29b8..74b1094e 100644 --- a/tools/stackcollapse-perf/stackcollapse-perf.go +++ b/tools/stackcollapse-perf/stackcollapse-perf.go @@ -1,8 +1,8 @@ -package main - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package main + // This code is a port of the Perl script stackcollapse-perf.pl from Brendan // Gregg's Flamegraph project -- github.com/brendangregg/FlameGraph. // All credit to Brendan Gregg for the original implementation and the flamegraph concept. diff --git a/tools/stackcollapse-perf/stackcollapse-perf_test.go b/tools/stackcollapse-perf/stackcollapse-perf_test.go index acc38ec5..053d029b 100644 --- a/tools/stackcollapse-perf/stackcollapse-perf_test.go +++ b/tools/stackcollapse-perf/stackcollapse-perf_test.go @@ -1,8 +1,8 @@ -package main - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package main + import ( "bytes" "strings" diff --git a/tools/tsc/tsc.go b/tools/tsc/tsc.go index 057d7b30..95fe5ba3 100644 --- a/tools/tsc/tsc.go +++ b/tools/tsc/tsc.go @@ -1,8 +1,8 @@ -package main - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +package main + import "fmt" func main() { diff --git a/tools/tsc/tsc_amd64.go b/tools/tsc/tsc_amd64.go index 6c964016..042a73bc 100644 --- a/tools/tsc/tsc_amd64.go +++ b/tools/tsc/tsc_amd64.go @@ -1,9 +1,9 @@ -// Time Stamp Counter helper functions. -package main - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause +// Time Stamp Counter helper functions. +package main + import ( "time" ) From 593fe271b21c169b800f6074dd501a3fd5c733d8 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 16:26:46 -0800 Subject: [PATCH 5/8] rename script_defs to scripts Signed-off-by: Harper, Jason M --- internal/script/{script_defs.go => scripts.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename internal/script/{script_defs.go => scripts.go} (100%) diff --git a/internal/script/script_defs.go b/internal/script/scripts.go similarity index 100% rename from internal/script/script_defs.go rename to internal/script/scripts.go From a20caab148f99a67ad3850d9f0db1853664493a7 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Sun, 28 Dec 2025 16:27:30 -0800 Subject: [PATCH 6/8] update comment Signed-off-by: Harper, Jason M --- internal/script/scripts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/script/scripts.go b/internal/script/scripts.go index 6d22bdc4..2848c53d 100644 --- a/internal/script/scripts.go +++ b/internal/script/scripts.go @@ -9,7 +9,7 @@ import ( texttemplate "text/template" // nosemgrep ) -// script_defs.go defines the bash scripts that are used to collect information from target systems +// scripts.go defines the bash scripts that are used to collect information from target systems type ScriptDefinition struct { Name string // just a name From 7fbe8f61fa8eeda7eeb4b3a5c59957f5cb18d611 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Mon, 29 Dec 2025 09:02:59 -0800 Subject: [PATCH 7/8] update deps --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index ba7a51e2..1fe93651 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/procfs v0.19.2 // indirect - github.com/richardlehane/mscfb v1.0.4 // indirect + github.com/richardlehane/mscfb v1.0.5 // indirect github.com/richardlehane/msoleps v1.0.4 // indirect github.com/tiendc/go-deepcopy v1.7.2 // indirect github.com/xuri/efp v0.0.1 // indirect @@ -49,6 +49,6 @@ require ( golang.org/x/crypto v0.46.0 // indirect golang.org/x/net v0.48.0 // indirect golang.org/x/sys v0.39.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 9ee62fdf..bd3afc99 100644 --- a/go.sum +++ b/go.sum @@ -36,9 +36,8 @@ github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+L github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM= -github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk= -github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= +github.com/richardlehane/mscfb v1.0.5 h1:OoQkDV2Bf2bIoSacCfJhSwm7BJN05fYFkwFUpxExtdY= +github.com/richardlehane/mscfb v1.0.5/go.mod h1:pe0+IUIc0AHh0+teNzBlJCtSyZdFOGgV4ZK9bsoV+Jo= github.com/richardlehane/msoleps v1.0.4 h1:WuESlvhX3gH2IHcd8UqyCuFY5yiq/GR/yqaSM/9/g00= github.com/richardlehane/msoleps v1.0.4/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= @@ -76,8 +75,8 @@ golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From 5a17054a76e3abbe7af4145283cd71aff6631357 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Mon, 29 Dec 2025 09:37:12 -0800 Subject: [PATCH 8/8] doc update Signed-off-by: Harper, Jason M --- ARCHITECTURE.md | 4 ++-- CONTRIBUTING.md | 38 ++++++++++++++++---------------------- 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index ecb3fb9d..e46c05f4 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -68,7 +68,7 @@ perfspect/ │ ├── cpus/ # CPU architecture detection │ ├── progress/ # Progress indicator (multi-spinner) │ └── util/ # General utilities -└── tools/ # External binaries for target systems +└── tools/ # Binaries used by scripts (embedded at build time) ``` ## Key Abstractions @@ -120,7 +120,7 @@ type ReportingCommand struct { ### 3. Script Engine (`internal/script/`) -Scripts are embedded in the binary using `//go:embed` and executed on targets via a controller script that manages concurrent/sequential execution and signal handling. +Collection scripts are defined in `internal/script/scripts.go`. Script dependencies, i.e., tools used by the scripts to collect data, are in `internal/script/resources/` and embedded in the binary using `//go:embed`. The scripts are executed on targets via a controller script that manages concurrent/sequential execution and signal handling. **Key concepts:** - `ScriptDefinition`: Defines a script (template, dependencies, required privileges) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ecdc1fc9..eddc70d2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,9 +13,9 @@ Thank you for your interest in contributing to PerfSpect! This document provides ### Building ```bash -make # Build the binary -make test # Run unit tests -make check # Run all code quality checks +builder/build.sh # Complete build, requires Docker +make # Build the x86 binary +make check # Run all code quality checks, including unit tests ``` ### Project Structure @@ -116,20 +116,15 @@ Tables define what data to collect. Add to the relevant command's table definiti ```go { - Name: "Your Table Name", - Category: "Category", - ScriptNames: []string{"script_that_provides_data"}, - Fields: []table.FieldDefinition{ - { - Name: "Field Name", - ValuesFunc: func(outputs map[string]script.ScriptOutput) []string { - // Parse script output and return field values - output := outputs["script_that_provides_data"] - // ... extraction logic ... - return []string{value} - }, - }, - }, + YourTableName: { + Name: YourTableName, + ScriptNames: []string{script.YourScriptName}, + FieldsFunc: YourTableValues}, +} +func YourTableValues() []table.FieldDefinition { + return []table.FieldDefinition{ + // Define fields here + } } ``` @@ -139,9 +134,8 @@ Tables define what data to collect. Add to the relevant command's table definiti ```go var YourScript = ScriptDefinition{ - Name: "your_script", - ScriptTemplate: `#!/bin/bash -# Your script content + Name: YourScriptName, + ScriptTemplate: `# Your script content echo "output" `, Superuser: false, // true if requires root @@ -152,7 +146,7 @@ echo "output" 2. Reference in your table's `ScriptNames` -3. If the script needs external binaries, add them to `tools/` or embed in `internal/script/resources/` +3. If the script needs external binaries, add them to `tools/`. Post build they will be embedded in `internal/script/resources/` ### Adding Metrics for a New CPU @@ -166,7 +160,7 @@ const UarchYourCPU = "YourCPU" 3. Choose appropriate loader or implement new one in `cmd/metrics/loader.go` -4. Add metric/event definitions to `cmd/metrics/resources/events/` and `cmd/metrics/resources/metrics/` +4. Add metric/event definitions to the associated loader directory in `cmd/metrics/resources` 5. Update `NewLoader()` switch statement