diff --git a/internal/cmdopts/cmdmetric.go b/internal/cmdopts/cmdmetric.go index 140e0765a2..bb9ed0d0a7 100644 --- a/internal/cmdopts/cmdmetric.go +++ b/internal/cmdopts/cmdmetric.go @@ -4,13 +4,15 @@ import ( "context" "fmt" "math" - "slices" + + "gopkg.in/yaml.v3" ) type MetricCommand struct { owner *Options PrintInit MetricPrintInitCommand `command:"print-init" description:"Get and print init SQL for a given metric or preset"` PrintSQL MetricPrintSQLCommand `command:"print-sql" description:"Get and print SQL for a given metric"` + List MetricListCommand `command:"list" description:"List available metrics and presets"` } func NewMetricCommand(owner *Options) *MetricCommand { @@ -18,6 +20,7 @@ func NewMetricCommand(owner *Options) *MetricCommand { owner: owner, PrintInit: MetricPrintInitCommand{owner: owner}, PrintSQL: MetricPrintSQLCommand{owner: owner}, + List: MetricListCommand{owner: owner}, } } @@ -34,21 +37,15 @@ func (cmd *MetricPrintInitCommand) Execute(args []string) error { if err != nil { return err } - w := cmd.owner.OutputWriter - for _, name := range args { - if preset, ok := metrics.PresetDefs[name]; ok { - for k := range preset.Metrics { - args = append(args, k) - } - } + metrics, err = metrics.FilterByNames(args) + if err != nil { + return err } - slices.Sort(args) - args = slices.Compact(args) - for _, mname := range args { - if m, ok := metrics.MetricDefs[mname]; ok && m.InitSQL != "" { - + w := cmd.owner.OutputWriter + for mname, mdef := range metrics.MetricDefs { + if mdef.InitSQL > "" { fmt.Fprintln(w, "--", mname) - fmt.Fprintln(w, m.InitSQL) + fmt.Fprintln(w, mdef.InitSQL) } } cmd.owner.CompleteCommand(ExitCodeOK) @@ -69,6 +66,10 @@ func (cmd *MetricPrintSQLCommand) Execute(args []string) error { if err != nil { return err } + metrics, err = metrics.FilterByNames(args) + if err != nil { + return err + } w := cmd.owner.OutputWriter if cmd.Version == 0 { cmd.Version = math.MaxInt32 @@ -84,3 +85,28 @@ func (cmd *MetricPrintSQLCommand) Execute(args []string) error { cmd.owner.CompleteCommand(ExitCodeOK) return nil } + +type MetricListCommand struct { + owner *Options +} + +func (cmd *MetricListCommand) Execute(args []string) error { + err := cmd.owner.InitMetricReader(context.Background()) + if err != nil { + return err + } + allMetrics, err := cmd.owner.MetricsReaderWriter.GetMetrics() + if err != nil { + return err + } + result, err := allMetrics.FilterByNames(args) + if err != nil { + return err + } + w := cmd.owner.OutputWriter + + yamlData, _ := yaml.Marshal(result) + fmt.Fprint(w, string(yamlData)) + cmd.owner.CompleteCommand(ExitCodeOK) + return nil +} diff --git a/internal/cmdopts/cmdmetric_test.go b/internal/cmdopts/cmdmetric_test.go index c4ea2451c5..63a9890508 100644 --- a/internal/cmdopts/cmdmetric_test.go +++ b/internal/cmdopts/cmdmetric_test.go @@ -15,8 +15,8 @@ func TestMetricPrintInit_Execute(t *testing.T) { w := &strings.Builder{} os.Args = []string{0: "config_test", "metric", "print-init", "test1"} _, err = New(w) - assert.Empty(t, w.String()) - assert.NoError(t, err, "should not error when no metrics found") + assert.Error(t, err, "should error when metric not found") + assert.Contains(t, err.Error(), "not found") w.Reset() os.Args = []string{0: "config_test", "metric", "print-init", "cpu_load"} @@ -45,8 +45,8 @@ func TestMetricPrintSQL_Execute(t *testing.T) { w := &strings.Builder{} os.Args = []string{0: "config_test", "metric", "print-sql", "test1"} _, err = New(w) - assert.Empty(t, w.String()) - assert.NoError(t, err, "should not error when no metrics found") + assert.Error(t, err, "should error when metric not found") + assert.Contains(t, err.Error(), "not found") w.Reset() os.Args = []string{0: "config_test", "metric", "print-sql", "cpu_load"} @@ -68,3 +68,54 @@ func TestMetricPrintSQL_Execute(t *testing.T) { _, err = New(w) assert.Error(t, err, "should error when no config database found") } + +func TestMetricList_Execute(t *testing.T) { + var err error + + // Test: List all metrics and presets (no argument) + w := &strings.Builder{} + os.Args = []string{0: "config_test", "metric", "list"} + _, err = New(w) + assert.NoError(t, err, "should not error when listing all metrics") + assert.Contains(t, w.String(), "metrics:") + assert.Contains(t, w.String(), "presets:") + assert.Contains(t, w.String(), "cpu_load") + assert.Contains(t, w.String(), "standard") + + // Test: List specific metric + w.Reset() + os.Args = []string{0: "config_test", "metric", "list", "cpu_load"} + _, err = New(w) + assert.NoError(t, err, "should not error when listing specific metric") + assert.Contains(t, w.String(), "cpu_load") + assert.Contains(t, w.String(), "metrics:") + // Should not contain other metrics + assert.NotContains(t, w.String(), "presets:") + + // Test: List specific preset + w.Reset() + os.Args = []string{0: "config_test", "metric", "list", "standard"} + _, err = New(w) + assert.NoError(t, err, "should not error when listing preset") + assert.Contains(t, w.String(), "presets:") + assert.Contains(t, w.String(), "standard") + assert.Contains(t, w.String(), "metrics:") + // Should contain metrics from the preset + assert.Contains(t, w.String(), "cpu_load") + + // Test: List non-existent metric/preset + w.Reset() + os.Args = []string{0: "config_test", "metric", "list", "nonexistent"} + _, err = New(w) + assert.Error(t, err, "should error when metric/preset not found") + assert.Contains(t, err.Error(), "not found") + + // Test: Error handling - invalid metrics path + os.Args = []string{0: "config_test", "--metrics=foo", "metric", "list"} + _, err = New(w) + assert.Error(t, err, "should error when no metric definitions found") + + os.Args = []string{0: "config_test", "--metrics=postgresql://foo@bar/fail", "metric", "list"} + _, err = New(w) + assert.Error(t, err, "should error when no config database found") +} diff --git a/internal/metrics/types.go b/internal/metrics/types.go index 4e11d1614f..e44299f128 100644 --- a/internal/metrics/types.go +++ b/internal/metrics/types.go @@ -1,6 +1,7 @@ package metrics import ( + "fmt" "maps" "time" @@ -154,9 +155,46 @@ type MeasurementEnvelope struct { } type Metrics struct { - MetricDefs MetricDefs `yaml:"metrics"` - PresetDefs PresetDefs `yaml:"presets"` + MetricDefs MetricDefs `yaml:"metrics,omitempty"` + PresetDefs PresetDefs `yaml:"presets,omitempty"` +} + +// FilterByNames returns a new Metrics struct containing only the specified metrics and/or presets. +// When a preset is requested, it includes both the preset definition and all its metrics. +// If names is empty, returns a full copy of all metrics and presets. +// Returns an error if any name is not found. +func (m *Metrics) FilterByNames(names []string) (*Metrics, error) { + result := &Metrics{ + MetricDefs: make(MetricDefs), + PresetDefs: make(PresetDefs), + } + + // If no names provided, return full copy + if len(names) == 0 { + maps.Copy(result.MetricDefs, m.MetricDefs) + maps.Copy(result.PresetDefs, m.PresetDefs) + return result, nil + } + + for _, name := range names { + if preset, ok := m.PresetDefs[name]; ok { + result.PresetDefs[name] = preset + // Include all metrics from the preset + for metricName := range preset.Metrics { + if metric, exists := m.MetricDefs[metricName]; exists { + result.MetricDefs[metricName] = metric + } + } + } else if metric, ok := m.MetricDefs[name]; ok { + result.MetricDefs[name] = metric + } else { + return nil, fmt.Errorf("metric or preset '%s' not found", name) + } + } + + return result, nil } + type Reader interface { GetMetrics() (*Metrics, error) } diff --git a/internal/metrics/types_test.go b/internal/metrics/types_test.go index f52c2cd0cb..48d2dc8041 100644 --- a/internal/metrics/types_test.go +++ b/internal/metrics/types_test.go @@ -64,3 +64,141 @@ func TestMeasurements(t *testing.T) { assert.NotEqual(t, m, m1, "deep copy should be different") assert.True(t, time.Now().UnixNano()-m1.GetEpoch() < int64(time.Second), "epoch should be close to now") } + +func TestFilterByNames(t *testing.T) { + // Setup test data + metrics := &Metrics{ + MetricDefs: MetricDefs{ + "cpu_load": Metric{ + Description: "CPU load metric", + InitSQL: "CREATE FUNCTION cpu_load()", + }, + "db_size": Metric{ + Description: "Database size metric", + }, + "db_stats": Metric{ + Description: "Database stats metric", + }, + "replication": Metric{ + Description: "Replication metric", + }, + }, + PresetDefs: PresetDefs{ + "minimal": Preset{ + Description: "Minimal preset", + Metrics: map[string]float64{ + "cpu_load": 60, + "db_size": 300, + }, + }, + "standard": Preset{ + Description: "Standard preset", + Metrics: map[string]float64{ + "cpu_load": 60, + "db_size": 300, + "db_stats": 60, + "replication": 120, + }, + }, + }, + } + + tests := []struct { + name string + names []string + wantMetrics []string + wantPresets []string + wantErr bool + errContains string + }{ + { + name: "empty names returns all", + names: []string{}, + wantMetrics: []string{"cpu_load", "db_size", "db_stats", "replication"}, + wantPresets: []string{"minimal", "standard"}, + wantErr: false, + }, + { + name: "single metric", + names: []string{"cpu_load"}, + wantMetrics: []string{"cpu_load"}, + wantPresets: []string{}, + wantErr: false, + }, + { + name: "multiple metrics", + names: []string{"cpu_load", "db_size"}, + wantMetrics: []string{"cpu_load", "db_size"}, + wantPresets: []string{}, + wantErr: false, + }, + { + name: "single preset includes all its metrics", + names: []string{"minimal"}, + wantMetrics: []string{"cpu_load", "db_size"}, + wantPresets: []string{"minimal"}, + wantErr: false, + }, + { + name: "multiple presets", + names: []string{"minimal", "standard"}, + wantMetrics: []string{"cpu_load", "db_size", "db_stats", "replication"}, + wantPresets: []string{"minimal", "standard"}, + wantErr: false, + }, + { + name: "mix of metrics and presets", + names: []string{"minimal", "replication"}, + wantMetrics: []string{"cpu_load", "db_size", "replication"}, + wantPresets: []string{"minimal"}, + wantErr: false, + }, + { + name: "non-existent metric", + names: []string{"nonexistent"}, + wantErr: true, + errContains: "not found", + }, + { + name: "mix of existing and non-existing", + names: []string{"cpu_load", "nonexistent"}, + wantErr: true, + errContains: "not found", + }, + { + name: "non-existent preset", + names: []string{"nonexistent_preset"}, + wantErr: true, + errContains: "not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := metrics.FilterByNames(tt.names) + + if tt.wantErr { + assert.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + return + } + + assert.NoError(t, err) + assert.NotNil(t, result) + + // Check metrics + assert.Equal(t, len(tt.wantMetrics), len(result.MetricDefs), "metric count mismatch") + for _, metricName := range tt.wantMetrics { + assert.Contains(t, result.MetricDefs, metricName, "expected metric not found: "+metricName) + } + + // Check presets + assert.Equal(t, len(tt.wantPresets), len(result.PresetDefs), "preset count mismatch") + for _, presetName := range tt.wantPresets { + assert.Contains(t, result.PresetDefs, presetName, "expected preset not found: "+presetName) + } + }) + } +}