From 9960d33e11ef8a96295bd7f51f8286faa05ca367 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sat, 21 Mar 2026 23:12:48 +0100 Subject: [PATCH 01/14] fixing concurrence read on the map --- internal/pluginmgr/manager.go | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/internal/pluginmgr/manager.go b/internal/pluginmgr/manager.go index 4066538..45c7b30 100644 --- a/internal/pluginmgr/manager.go +++ b/internal/pluginmgr/manager.go @@ -367,7 +367,7 @@ func (m *PluginManager[T]) startWithBackoff(path, hash string) error { err := m.start(path, hash) if err != nil { m.mu.Lock() - f = m.failures[path] + f = m.failures[path] // re-fetch: another goroutine may have updated this key between the two lock acquisitions if f == nil { f = &startFailure{hash: hash} m.failures[path] = f @@ -425,7 +425,11 @@ func (m *PluginManager[T]) update(path string, oldHandles []*PluginHandle, newHa func (m *PluginManager[T]) kill(handle *PluginHandle) { handle.killOnce.Do(func() { close(handle.stopped) - defer func() { recover() }() //nolint:errcheck - best-effort shutdown + defer func() { + if r := recover(); r != nil { + m.log.ErrorF("panic during shutdown of %s [%s]: %v", handle.Name, handle.ID, r) + } + }() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) _ = handle.Lifecycle.Shutdown(ctx) cancel() @@ -469,11 +473,16 @@ func (m *PluginManager[T]) remove(key string, handles []*PluginHandle) { // stops the subprocesses and restarts them with backoff. // Sets restarting[path] before stop() so reconcile() does not race to fill the // now-empty plugin_handles slot while the new spawn is in progress. +// If restarting[path] is already set, another pingLoop worker beat us to it — bail. func (m *PluginManager[T]) restart(key string, handles []*PluginHandle) error { path := handles[0].BinPath hash := handles[0].Hash m.mu.Lock() + if _, already := m.restarting[path]; already { + m.mu.Unlock() + return nil // another pingLoop worker is already handling this restart + } m.restarting[path] = struct{}{} m.mu.Unlock() @@ -523,6 +532,18 @@ func (m *PluginManager[T]) pingLoop(handle *PluginHandle) { m.mu.RLock() group := m.plugin_handles[handle.BinPath] m.mu.RUnlock() + // Guard: if our handle is no longer in the group, update() replaced it + // while Ping() was running. The new workers' pingLoops own any future restarts. + inGroup := false + for _, h := range group { + if h == handle { + inGroup = true + break + } + } + if !inGroup { + return + } if restartErr := m.restart(handle.BinPath, group); restartErr != nil { m.log.Error(errors.NewF("restart failed for %s: %v", handle.BinPath, restartErr)) } From 68f321d456d8600d8a33b63e4ed108d318724676 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sun, 22 Mar 2026 18:48:31 +0100 Subject: [PATCH 02/14] adding batching evaluation --- cmd/alert_enricher/enricher/enricher.go | 208 ++++++++++++------ cmd/alert_formatter/formatter/formatter.go | 2 +- cmd/alert_merger/merger/merger.go | 75 ++++--- cmd/rule_tuner/tuner/tuner.go | 93 +++++--- internal/configuration/configuration.go | 9 + internal/pools/metrics.go | 5 - internal/pools/routing.go | 9 +- pkg/formatters/rpc_formatters/formatter.pb.go | 8 + pkg/formatters/rpc_formatters/formatter.proto | 1 + pkg/rules/batch.go | 17 -- pkg/rules/testdata/crashing_rule/main.go | 27 +++ pkg/rules/testdata/simple_rule/main.go | 19 ++ pkg/tuning_rules/rpc_tuning_rule.go | 4 + .../rpc_tuning_rules/tuning_rule.pb.go | 10 +- .../rpc_tuning_rules/tuning_rule.proto | 1 + pkg/tuning_rules/tuning_rule.go | 1 + 16 files changed, 330 insertions(+), 159 deletions(-) delete mode 100644 pkg/rules/batch.go create mode 100644 pkg/rules/testdata/crashing_rule/main.go create mode 100644 pkg/rules/testdata/simple_rule/main.go diff --git a/cmd/alert_enricher/enricher/enricher.go b/cmd/alert_enricher/enricher/enricher.go index b6761be..32388e7 100644 --- a/cmd/alert_enricher/enricher/enricher.go +++ b/cmd/alert_enricher/enricher/enricher.go @@ -3,7 +3,6 @@ package enricher import ( "context" "sync" - "sync/atomic" "time" "github.com/harishhary/blink/internal/broker" @@ -19,9 +18,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" ) -const ( - defaultEnrichmentTimeout = 5 * time.Second -) +const defaultEnrichmentTimeout = 5 * time.Second var ( alertsIn = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_enricher", Name: "alerts_in_total"}) @@ -34,6 +31,13 @@ var ( writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_enricher", Name: "write_errors_total"}) ) +// enrichAlertState holds a decoded alert and its enrichment outcome for a batch entry. +type enrichAlertState struct { + key []byte + alert *alerts.Alert + anyMissing bool +} + // EnricherService reads alerts from Kafka, enriches them, and writes to the formatter topic. type EnricherService struct { svcctx.ServiceContext @@ -71,73 +75,147 @@ func NewEnricherService(pool *enrichcatalog.Pool) (*EnricherService, error) { func (service *EnricherService) Name() string { return "alert-enricher" } -// Reads alerts from Kafka, applies enrichments declared by the alert's rule, and writes to the formatter topic. func (service *EnricherService) Run(ctx context.Context) errors.Error { - return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, - services.PipelineCounters{ - In: alertsIn.Inc, Out: alertsOut.Inc, DLQ: alertsDLQ.Inc, - ParseError: parseErrors.Inc, WriteError: writeErrors.Inc, - }, - func(ctx context.Context, _ []byte, alert *alerts.Alert) (skip bool, deadLetter bool) { - service.Info("enriching alert %s", alert.AlertID) - - applied := make(map[string]struct{}, len(alert.EnrichmentsApplied)) - for _, name := range alert.EnrichmentsApplied { - applied[name] = struct{}{} + for { + msgs, err := service.reader.ReadBatch(ctx, 50) + if err != nil { + if ctx.Err() != nil { + return nil } + service.Error(errors.NewE(err)) + continue + } - var ( - anyMissing atomic.Bool - mu sync.Mutex - succeeded []string - wg sync.WaitGroup - ) - for _, name := range alert.Rule.Enrichments() { - if _, done := applied[name]; done { - continue - } - wg.Add(1) - go func(enrName string) { - defer wg.Done() - - cctx, cancel := context.WithTimeout(ctx, defaultEnrichmentTimeout) - defer cancel() - start := time.Now() - absent, removed, err := service.pool.Enrich(cctx, enrName, alert, "") - switch { - case removed: - anyMissing.Store(true) - service.Error(errors.NewF("enrichment %s removed - alert %s missing enrichment", enrName, alert.AlertID)) - case absent: - anyMissing.Store(true) - service.Error(errors.NewF("enrichment %s not found - alert %s missing enrichment", enrName, alert.AlertID)) - case err != nil: - enrichmentErrors.WithLabelValues(enrName).Inc() - service.Error(errors.NewF("enrichment %s failed: %v", enrName, err)) - default: - enrichmentsApplied.WithLabelValues(enrName).Inc() - mu.Lock() - succeeded = append(succeeded, enrName) - mu.Unlock() - } - enrichmentLatency.WithLabelValues(enrName).Observe(time.Since(start).Seconds()) - }(name) + service.processBatch(ctx, msgs) + + if err := service.reader.CommitMessages(ctx, msgs...); err != nil { + if ctx.Err() != nil { + return nil } - wg.Wait() + service.Error(errors.NewE(err)) + } + } +} - alert.EnrichmentsApplied = append(alert.EnrichmentsApplied, succeeded...) +func (service *EnricherService) processBatch(ctx context.Context, msgs []broker.Message) { + // Decode all alerts. + states := make([]*enrichAlertState, 0, len(msgs)) + for _, m := range msgs { + alert, err := alerts.Unmarshal(m.Value) + if err != nil { + parseErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsIn.Inc() + states = append(states, &enrichAlertState{key: m.Key, alert: alert}) + } + if len(states) == 0 { + return + } - if anyMissing.Load() { - alert.Attempts++ - if alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { - service.Info("alert %s passed through after %d attempts (enrichment unavailable)", alert.AlertID, alert.Attempts) - alert.EnrichmentsApplied = nil - return false, false + // Group by enrichment name: name → indices into states. + // Respect already-applied enrichments from prior DLQ retries. + byEnrichment := make(map[string][]int) + for i, s := range states { + applied := make(map[string]struct{}, len(s.alert.EnrichmentsApplied)) + for _, name := range s.alert.EnrichmentsApplied { + applied[name] = struct{}{} + } + for _, name := range s.alert.Rule.Enrichments() { + if _, done := applied[name]; done { + continue + } + byEnrichment[name] = append(byEnrichment[name], i) + } + } + + // Fan out: one goroutine per enrichment with all its alerts. + var mu sync.Mutex + var wg sync.WaitGroup + for name, idxs := range byEnrichment { + wg.Add(1) + go func(name string, idxs []int) { + defer wg.Done() + + alrts := make([]*alerts.Alert, len(idxs)) + for j, idx := range idxs { + alrts[j] = states[idx].alert + } + + cctx, cancel := context.WithTimeout(ctx, defaultEnrichmentTimeout) + defer cancel() + start := time.Now() + absent, removed, errs := service.pool.Enrich(cctx, name, alrts, "") + enrichmentLatency.WithLabelValues(name).Observe(time.Since(start).Seconds()) + + mu.Lock() + defer mu.Unlock() + switch { + case removed: + service.Error(errors.NewF("enrichment %s removed", name)) + for _, idx := range idxs { + states[idx].anyMissing = true + } + case absent: + service.Error(errors.NewF("enrichment %s not found", name)) + for _, idx := range idxs { + states[idx].anyMissing = true + } + default: + for j, idx := range idxs { + if errs[j] != nil { + enrichmentErrors.WithLabelValues(name).Inc() + service.Error(errs[j]) + } else { + enrichmentsApplied.WithLabelValues(name).Inc() + states[idx].alert.EnrichmentsApplied = append(states[idx].alert.EnrichmentsApplied, name) + } } - return false, true } - alert.EnrichmentsApplied = nil - return false, false - }, - ) + }(name, idxs) + } + wg.Wait() + + // Write results. + for _, s := range states { + if s.anyMissing { + s.alert.Attempts++ + if s.alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { + service.Info("alert %s passed through after %d attempts (enrichment unavailable)", s.alert.AlertID, s.alert.Attempts) + s.alert.EnrichmentsApplied = nil + // fall through to write + } else { + payload, err := alerts.Marshal(s.alert) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + err = service.dlq.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + } else { + alertsDLQ.Inc() + } + continue + } + } + + s.alert.EnrichmentsApplied = nil + payload, err := alerts.Marshal(s.alert) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + err = service.writer.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsOut.Inc() + } } diff --git a/cmd/alert_formatter/formatter/formatter.go b/cmd/alert_formatter/formatter/formatter.go index 330de8c..c359b25 100644 --- a/cmd/alert_formatter/formatter/formatter.go +++ b/cmd/alert_formatter/formatter/formatter.go @@ -64,7 +64,7 @@ func (service *FormatterService) Name() string { return "alert-formatter" } // Reads alerts from Kafka, applies formatters, and writes to the dispatcher topic. func (service *FormatterService) Run(ctx context.Context) errors.Error { - return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, + return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, 4, services.PipelineCounters{ In: alertsIn.Inc, Out: alertsOut.Inc, DLQ: alertsDLQ.Inc, ParseError: parseErrors.Inc, WriteError: writeErrors.Inc, diff --git a/cmd/alert_merger/merger/merger.go b/cmd/alert_merger/merger/merger.go index 000361b..88b6dac 100644 --- a/cmd/alert_merger/merger/merger.go +++ b/cmd/alert_merger/merger/merger.go @@ -2,9 +2,6 @@ package merger import ( "context" - "fmt" - "sort" - "strings" "sync" "time" @@ -20,13 +17,14 @@ import ( ) var ( - alertsIn = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_in_total"}) - alertsOut = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_out_total"}) - alertsMerged = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_merged_total"}) - groupsFlushed = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_flushed_total"}) - parseErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "parse_errors_total"}) - writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "write_errors_total"}) - activeGroups = promauto.NewGauge(prometheus.GaugeOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "active_groups"}) + alertsIn = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_in_total"}) + alertsOut = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_out_total"}) + alertsMerged = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_merged_total"}) + groupsFlushed = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_flushed_total"}) + groupsEvicted = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_evicted_total"}) + parseErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "parse_errors_total"}) + writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "write_errors_total"}) + activeGroups = promauto.NewGauge(prometheus.GaugeOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "active_groups"}) ) // mergeGroup holds a set of alerts that share the same rule and merge-by key values and are within each other's merge window. @@ -38,10 +36,11 @@ type mergeGroup struct { // MergerService reads alerts from Kafka, merges related alerts within their time window, and writes merged (or pass-through) alerts to the tuner topic. type MergerService struct { svcctx.ServiceContext - reader broker.Reader - writer broker.Writer - mu sync.Mutex - groups map[string]*mergeGroup // key: rule_name|merge_by_values + reader broker.Reader + writer broker.Writer + mu sync.Mutex + groups map[string]*mergeGroup // key: rule_name|merge_by_values + maxGroups int // 0 = unlimited } func NewMergerService() (*MergerService, error) { @@ -61,6 +60,7 @@ func NewMergerService() (*MergerService, error) { reader: reader, writer: writer, groups: make(map[string]*mergeGroup), + maxGroups: cfg.Merger.MaxGroups, }, nil } @@ -109,8 +109,9 @@ func (s *MergerService) Run(ctx context.Context) errors.Error { } // adds alert to its merge group, or flushes the existing group and starts a new one when the incoming alert falls outside the current window. +// If maxGroups is set and the cap is exceeded after inserting, the oldest group (earliest expiry) is evicted immediately. func (s *MergerService) accumulate(ctx context.Context, alert *alerts.Alert) { - key := groupKey(alert) + key := alert.MergePartitionKey() s.mu.Lock() g, exists := s.groups[key] @@ -121,24 +122,46 @@ func (s *MergerService) accumulate(ctx context.Context, alert *alerts.Alert) { return } - // Either no existing group or the window has moved on - flush the old group + // Either no existing group or the window has moved on — flush the old group // (if any) and start a new one. - var toFlush *mergeGroup + toFlush := make([]*mergeGroup, 0, 2) if exists { - toFlush = g + toFlush = append(toFlush, g) } s.groups[key] = &mergeGroup{ alerts: []*alerts.Alert{alert}, expires: alert.Created.Add(alert.Rule.MergeWindowMins()), } + + // Cap eviction: if over the limit, find and remove the oldest group so memory + // stays bounded regardless of merge key cardinality. + if s.maxGroups > 0 && len(s.groups) > s.maxGroups { + oldestKey := s.oldestKey() + toFlush = append(toFlush, s.groups[oldestKey]) + delete(s.groups, oldestKey) + groupsEvicted.Inc() + } + activeGroups.Set(float64(len(s.groups))) s.mu.Unlock() - if toFlush != nil { - s.flushGroup(ctx, toFlush) + for _, g := range toFlush { + s.flushGroup(ctx, g) } } +// oldestKey returns the map key of the group with the earliest expiry time. +// Must be called with s.mu held. +func (s *MergerService) oldestKey() string { + var oldest string + for k, g := range s.groups { + if oldest == "" || g.expires.Before(s.groups[oldest].expires) { + oldest = k + } + } + return oldest +} + // ticks every 10 seconds and flushes any group whose window has closed. func (s *MergerService) flushLoop(ctx context.Context) { ticker := time.NewTicker(10 * time.Second) @@ -229,15 +252,3 @@ func (s *MergerService) writeAlert(ctx context.Context, alert *alerts.Alert) { alertsOut.Inc() } -// groupKey builds a stable string key from the alert's rule name and merge-by field values. Keys are sorted before joining to ensure map key consistency regardless of iteration order. -func groupKey(alert *alerts.Alert) string { - keys := alert.Rule.MergeByKeys() - sort.Strings(keys) - merged := alert.Event.GetMergedKeys(keys) - parts := make([]string, 0, len(keys)+1) - parts = append(parts, alert.Rule.Name()) - for _, k := range keys { - parts = append(parts, fmt.Sprintf("%v", merged[k])) - } - return strings.Join(parts, "|") -} diff --git a/cmd/rule_tuner/tuner/tuner.go b/cmd/rule_tuner/tuner/tuner.go index b626ee2..f28f658 100644 --- a/cmd/rule_tuner/tuner/tuner.go +++ b/cmd/rule_tuner/tuner/tuner.go @@ -3,6 +3,8 @@ package tuner import ( "context" stderrors "errors" + "sync" + "sync/atomic" "github.com/harishhary/blink/internal/broker" "github.com/harishhary/blink/internal/broker/kafka" @@ -76,7 +78,7 @@ func NewTunerService(pool *tuningcatalog.Pool) (*TunerService, error) { func (service *TunerService) Name() string { return "rule-tuner" } func (service *TunerService) Run(ctx context.Context) errors.Error { - return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, + return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, 4, services.PipelineCounters{ In: alertsIn.Inc, Out: alertsOut.Inc, DLQ: alertsDLQ.Inc, ParseError: parseErrors.Inc, WriteError: writeErrors.Inc, @@ -84,41 +86,66 @@ func (service *TunerService) Run(ctx context.Context) errors.Error { func(ctx context.Context, _ []byte, alert *alerts.Alert) (skip bool, deadLetter bool) { service.Info("applying tuning rules for alert %s", alert.AlertID) - var results []tuneResult - for _, name := range alert.Rule.TuningRules() { - var res tuneResult - if err := service.pool.Call(ctx, name, "", func(callCtx context.Context, r tuning_rules.TuningRule) error { - if !r.Enabled() { - return nil - } - res.ruleType = r.RuleType() - res.confidence = r.Confidence() - applies, e := r.Tune(callCtx, *alert) - if e != nil { - return e - } - res.applies = applies - return nil - }); err != nil { - if stderrors.Is(err, pools.ErrPluginRemoved) || stderrors.Is(err, pools.ErrPluginNotFound) { - label := "not found" - if stderrors.Is(err, pools.ErrPluginRemoved) { - label = "removed" + names := alert.Rule.TuningRules() + if len(names) == 0 { + return false, false + } + + // Fan out tuning rule evaluations. Each r.Tune receives a copy of the + // alert (*alert dereference), so goroutines read independent state. + var ( + mu sync.Mutex + anyMissing atomic.Bool + results []tuneResult + wg sync.WaitGroup + ) + for _, name := range names { + wg.Add(1) + go func(name string) { + defer wg.Done() + var res tuneResult + err := service.pool.Call(ctx, name, "", func(callCtx context.Context, r tuning_rules.TuningRule) error { + if !r.Enabled() { + return nil } - service.Error(errors.NewF("tuning rule %s %s - alert %s missing tuning", name, label, alert.AlertID)) - alert.Attempts++ - if alert.Attempts >= services.MaxPluginAttempts { - service.Info("alert %s passed through after %d attempts (tuning rule unavailable)", alert.AlertID, alert.Attempts) - continue + res.ruleType = r.RuleType() + res.confidence = r.Confidence() + applies, e := r.Tune(callCtx, *alert) + if e != nil { + return e } - return false, true + res.applies = applies + return nil + }) + if err != nil { + if stderrors.Is(err, pools.ErrPluginRemoved) || stderrors.Is(err, pools.ErrPluginNotFound) { + label := "not found" + if stderrors.Is(err, pools.ErrPluginRemoved) { + label = "removed" + } + service.Error(errors.NewF("tuning rule %s %s - alert %s missing tuning", name, label, alert.AlertID)) + anyMissing.Store(true) + return + } + service.Error(errors.NewE(err)) + tuningErrors.Inc() + return } - service.Error(errors.NewE(err)) - tuningErrors.Inc() - return false, false - } - if res.applies { - results = append(results, res) + if res.applies { + mu.Lock() + results = append(results, res) + mu.Unlock() + } + }(name) + } + wg.Wait() + + if anyMissing.Load() { + alert.Attempts++ + if alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { + service.Info("alert %s passed through after %d attempts (tuning rule unavailable)", alert.AlertID, alert.Attempts) + } else { + return false, true } } diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index ce84bf9..dce4794 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -17,6 +17,7 @@ type ServiceConfiguration struct { Kafka KafkaConfig Topics KafkaTopicsGroups Executor ExecutorConfig + Merger MergerConfig } // ServiceRole returns the role used by the service to perform operations @@ -77,3 +78,11 @@ type ExecutorConfig struct { // TimeoutSec is the per-event evaluation timeout in seconds. TimeoutSec int `env:"EXECUTOR_TIMEOUT_SEC,optional"` } + +type MergerConfig struct { + // MaxGroups caps the number of live merge groups held in memory per replica. + // When the cap is exceeded the oldest group (earliest expiry) is flushed + // immediately rather than waiting for its window to close. + // 0 means unlimited — only safe when merge_by_keys have low cardinality. + MaxGroups int `env:"MERGER_MAX_GROUPS,optional"` +} diff --git a/internal/pools/metrics.go b/internal/pools/metrics.go index 8945381..61a9dba 100644 --- a/internal/pools/metrics.go +++ b/internal/pools/metrics.go @@ -10,7 +10,6 @@ type PoolMetrics struct { poolSize *prometheus.GaugeVec poolInflight *prometheus.GaugeVec drainDuration *prometheus.HistogramVec - killSwitches *prometheus.CounterVec shadowDiffs *prometheus.CounterVec } @@ -30,10 +29,6 @@ func NewPoolMetrics(subsystem string) *PoolMetrics { Name: "drain_duration_seconds", Help: "Time to drain an old pool.", Buckets: prometheus.DefBuckets, }, []string{"plugin_id", "version"}), - killSwitches: promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "blink", Subsystem: "pool_" + subsystem, - Name: "kill_switch_total", Help: "Kill switch activations.", - }, []string{"plugin_id"}), shadowDiffs: promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "blink", Subsystem: "pool_" + subsystem, Name: "shadow_diff_total", Help: "Shadow evaluation errors or divergences.", diff --git a/internal/pools/routing.go b/internal/pools/routing.go index da22e84..352741b 100644 --- a/internal/pools/routing.go +++ b/internal/pools/routing.go @@ -4,14 +4,13 @@ import "sync" // PluginRouting holds the routing configuration for a single plugin. type PluginRouting struct { - KillSwitch bool Mode RolloutMode RolloutPct float64 } // RoutingTable is a thread-safe map from pluginID to PluginRouting. // Pass RoutingTable.Config() to NewProcessPool to enable live routing control. -// An empty table is valid - missing entries default to blue-green with no kill switch. +// An empty table is valid - missing entries default to blue-green. type RoutingTable struct { mu sync.RWMutex entries map[string]PluginRouting @@ -39,13 +38,13 @@ func (t *RoutingTable) Delete(pluginID string) { // Returns a RoutingConfig closure that reads live from the table. // Pass this to NewProcessPool. func (t *RoutingTable) Config() RoutingConfig { - return func(pluginID string) (bool, RolloutMode, float64) { + return func(pluginID string) (RolloutMode, float64) { t.mu.RLock() r, ok := t.entries[pluginID] t.mu.RUnlock() if !ok { - return false, RolloutModeBlueGreen, 0 + return RolloutModeBlueGreen, 0 } - return r.KillSwitch, r.Mode, r.RolloutPct + return r.Mode, r.RolloutPct } } diff --git a/pkg/formatters/rpc_formatters/formatter.pb.go b/pkg/formatters/rpc_formatters/formatter.pb.go index d968319..03314c7 100644 --- a/pkg/formatters/rpc_formatters/formatter.pb.go +++ b/pkg/formatters/rpc_formatters/formatter.pb.go @@ -63,6 +63,7 @@ type FormatterMetadata struct { Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -125,6 +126,13 @@ func (x *FormatterMetadata) GetEnabled() bool { return false } +func (x *FormatterMetadata) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + type FormatRequest struct { state protoimpl.MessageState `protogen:"open.v1"` AlertJson []byte `protobuf:"bytes,1,opt,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // JSON-encoded alerts.Alert diff --git a/pkg/formatters/rpc_formatters/formatter.proto b/pkg/formatters/rpc_formatters/formatter.proto index c2e7f65..aa3e1d7 100644 --- a/pkg/formatters/rpc_formatters/formatter.proto +++ b/pkg/formatters/rpc_formatters/formatter.proto @@ -11,6 +11,7 @@ message FormatterMetadata { string name = 2; string description = 3; bool enabled = 4; + string version = 5; } message FormatRequest { diff --git a/pkg/rules/batch.go b/pkg/rules/batch.go deleted file mode 100644 index 9d7dbf4..0000000 --- a/pkg/rules/batch.go +++ /dev/null @@ -1,17 +0,0 @@ -package rules - -import ( - "context" - - "github.com/harishhary/blink/internal/errors" - "github.com/harishhary/blink/pkg/events" -) - -// BatchEvaluator is an optional capability that rules may implement to evaluate -// multiple events in a single call. The rule executor checks for this interface -// via type assertion and prefers it over N individual Evaluate() calls when -// processing a batch of events for the same log type, reducing gRPC round-trips -// for go-plugin rules. -type BatchEvaluator interface { - EvaluateBatch(ctx context.Context, events []events.Event) ([]bool, errors.Error) -} diff --git a/pkg/rules/testdata/crashing_rule/main.go b/pkg/rules/testdata/crashing_rule/main.go new file mode 100644 index 0000000..2a85587 --- /dev/null +++ b/pkg/rules/testdata/crashing_rule/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "context" + "os" + "time" + + "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/rules/sdk" +) + +type crashingRule struct{ sdk.BaseRule } + +func (crashingRule) Evaluate(_ context.Context, _ events.Event) (bool, errors.Error) { + return false, nil +} + +func main() { + // Exit 300ms after startup — long enough for the manager to complete the + // Init handshake (~50ms), short enough for crash tests to run quickly. + go func() { + time.Sleep(300 * time.Millisecond) + os.Exit(1) + }() + sdk.Serve(crashingRule{}) +} diff --git a/pkg/rules/testdata/simple_rule/main.go b/pkg/rules/testdata/simple_rule/main.go new file mode 100644 index 0000000..38f6667 --- /dev/null +++ b/pkg/rules/testdata/simple_rule/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "context" + + "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/rules/sdk" +) + +type simpleRule struct{ sdk.BaseRule } + +func (simpleRule) Evaluate(_ context.Context, _ events.Event) (bool, errors.Error) { + return true, nil +} + +func main() { + sdk.Serve(simpleRule{}) +} diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index a90da5a..c65f90f 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -39,6 +39,10 @@ func (r *rpcTuningRule) Enabled() bool { return r.meta.GetEnabled() } +func (r *rpcTuningRule) Version() string { + return r.meta.GetVersion() +} + func (r *rpcTuningRule) Checksum() string { return r.checksum } diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go index 325fee6..28bbe44 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go @@ -65,7 +65,8 @@ type TuningMetadata struct { Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global,omitempty"` RuleType int32 `protobuf:"varint,6,opt,name=rule_type,json=ruleType,proto3" json:"rule_type,omitempty"` // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence - Confidence string `protobuf:"bytes,7,opt,name=confidence,proto3" json:"confidence,omitempty"` // "verylow|low|medium|high|veryhigh" + Confidence string `protobuf:"bytes,7,opt,name=confidence,proto3" json:"confidence,omitempty"` // "verylow|low|medium|high|veryhigh" + Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -149,6 +150,13 @@ func (x *TuningMetadata) GetConfidence() string { return "" } +func (x *TuningMetadata) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + type TuneRequest struct { state protoimpl.MessageState `protogen:"open.v1"` AlertJson []byte `protobuf:"bytes,1,opt,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // JSON-encoded alerts.Alert diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto index b806182..904bc89 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto @@ -14,6 +14,7 @@ message TuningMetadata { bool global = 5; int32 rule_type = 6; // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence string confidence = 7; // "verylow|low|medium|high|veryhigh" + string version = 8; } message TuneRequest { diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index bc2a1bf..ee264ea 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -33,6 +33,7 @@ type TuningRule interface { Name() string Description() string Enabled() bool + Version() string Global() bool RuleType() RuleType Confidence() scoring.Confidence From 1dabd90b1482a975d15875307971a685edb56fe1 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sun, 22 Mar 2026 20:29:01 +0100 Subject: [PATCH 03/14] batching evaluation again --- cmd/rule_tuner/tuner/tuner.go | 218 ++++++++++------ pkg/alerts/alert.go | 33 ++- pkg/enrichments/enrichment.go | 3 +- pkg/enrichments/rpc_enrichment.go | 31 ++- .../rpc_enrichments/enrichment.pb.go | 235 +++++++++++++----- .../rpc_enrichments/enrichment.proto | 15 +- .../rpc_enrichments/enrichment_grpc.pb.go | 42 +++- pkg/enrichments/sdk/serve.go | 20 ++ pkg/formatters/formatter.go | 3 +- pkg/formatters/rpc_formatter.go | 27 +- pkg/formatters/rpc_formatters/formatter.pb.go | 206 +++++++++++---- pkg/formatters/rpc_formatters/formatter.proto | 19 +- .../rpc_formatters/formatter_grpc.pb.go | 42 +++- pkg/formatters/sdk/serve.go | 20 ++ pkg/matchers/matcher.go | 3 +- pkg/matchers/rpc_matcher.go | 17 +- pkg/matchers/rpc_matchers/matcher.pb.go | 225 ++++++++++++----- pkg/matchers/rpc_matchers/matcher.proto | 15 +- pkg/matchers/rpc_matchers/matcher_grpc.pb.go | 42 +++- pkg/matchers/sdk/serve.go | 16 ++ pkg/rules/rpc_rules.go | 14 +- pkg/rules/rule.go | 6 +- pkg/tuning_rules/rpc_tuning_rule.go | 16 +- .../rpc_tuning_rules/tuning_rule.pb.go | 207 +++++++++++---- .../rpc_tuning_rules/tuning_rule.proto | 19 +- .../rpc_tuning_rules/tuning_rule_grpc.pb.go | 42 +++- pkg/tuning_rules/sdk/serve.go | 16 ++ pkg/tuning_rules/tuning_rule.go | 15 +- 28 files changed, 1154 insertions(+), 413 deletions(-) diff --git a/cmd/rule_tuner/tuner/tuner.go b/cmd/rule_tuner/tuner/tuner.go index f28f658..2a45379 100644 --- a/cmd/rule_tuner/tuner/tuner.go +++ b/cmd/rule_tuner/tuner/tuner.go @@ -4,7 +4,6 @@ import ( "context" stderrors "errors" "sync" - "sync/atomic" "github.com/harishhary/blink/internal/broker" "github.com/harishhary/blink/internal/broker/kafka" @@ -33,13 +32,21 @@ var ( writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "rule_tuner", Name: "write_errors_total"}) ) -// tuneResult holds the outcome of a single tuning rule evaluation. +// tuneResult holds the outcome of a single tuning rule evaluation for one alert. type tuneResult struct { ruleType tuning_rules.RuleType confidence scoring.Confidence applies bool } +// alertState groups a decoded alert with its accumulated tuning results. +type alertState struct { + key []byte + alert *alerts.Alert + results []tuneResult + anyMissing bool +} + // TunerService reads alerts from Kafka, applies tuning rules, and writes to the enricher topic. type TunerService struct { svcctx.ServiceContext @@ -78,91 +85,148 @@ func NewTunerService(pool *tuningcatalog.Pool) (*TunerService, error) { func (service *TunerService) Name() string { return "rule-tuner" } func (service *TunerService) Run(ctx context.Context) errors.Error { - return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, 4, - services.PipelineCounters{ - In: alertsIn.Inc, Out: alertsOut.Inc, DLQ: alertsDLQ.Inc, - ParseError: parseErrors.Inc, WriteError: writeErrors.Inc, - }, - func(ctx context.Context, _ []byte, alert *alerts.Alert) (skip bool, deadLetter bool) { - service.Info("applying tuning rules for alert %s", alert.AlertID) - - names := alert.Rule.TuningRules() - if len(names) == 0 { - return false, false + for { + msgs, err := service.reader.ReadBatch(ctx, 50) + if err != nil { + if ctx.Err() != nil { + return nil } + service.Error(errors.NewE(err)) + continue + } - // Fan out tuning rule evaluations. Each r.Tune receives a copy of the - // alert (*alert dereference), so goroutines read independent state. - var ( - mu sync.Mutex - anyMissing atomic.Bool - results []tuneResult - wg sync.WaitGroup - ) - for _, name := range names { - wg.Add(1) - go func(name string) { - defer wg.Done() - var res tuneResult - err := service.pool.Call(ctx, name, "", func(callCtx context.Context, r tuning_rules.TuningRule) error { - if !r.Enabled() { - return nil - } - res.ruleType = r.RuleType() - res.confidence = r.Confidence() - applies, e := r.Tune(callCtx, *alert) - if e != nil { - return e - } - res.applies = applies - return nil - }) - if err != nil { - if stderrors.Is(err, pools.ErrPluginRemoved) || stderrors.Is(err, pools.ErrPluginNotFound) { - label := "not found" - if stderrors.Is(err, pools.ErrPluginRemoved) { - label = "removed" - } - service.Error(errors.NewF("tuning rule %s %s - alert %s missing tuning", name, label, alert.AlertID)) - anyMissing.Store(true) - return - } - service.Error(errors.NewE(err)) - tuningErrors.Inc() - return + service.processBatch(ctx, msgs) + + if err := service.reader.CommitMessages(ctx, msgs...); err != nil { + if ctx.Err() != nil { + return nil + } + service.Error(errors.NewE(err)) + } + } +} + +func (service *TunerService) processBatch(ctx context.Context, msgs []broker.Message) { + // Decode all alerts. + states := make([]*alertState, 0, len(msgs)) + for _, m := range msgs { + alert, err := alerts.Unmarshal(m.Value) + if err != nil { + parseErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsIn.Inc() + states = append(states, &alertState{key: m.Key, alert: alert}) + } + if len(states) == 0 { + return + } + + // Group by tuning rule name: name => indices into states. + byRule := make(map[string][]int) + for i, s := range states { + for _, name := range s.alert.Rule.TuningRules() { + byRule[name] = append(byRule[name], i) + } + } + + // Fan out: one goroutine per tuning rule with all its alerts. + var mu sync.Mutex + var wg sync.WaitGroup + for name, idxs := range byRule { + wg.Add(1) + go func(name string, idxs []int) { + defer wg.Done() + + copies := make([]alerts.Alert, len(idxs)) + for j, idx := range idxs { + copies[j] = *states[idx].alert + } + + ruleType, confidence, applies, err := service.pool.Tune(ctx, name, copies, "") + if err != nil { + if stderrors.Is(err, pools.ErrPluginRemoved) || stderrors.Is(err, pools.ErrPluginNotFound) { + label := "not found" + if stderrors.Is(err, pools.ErrPluginRemoved) { + label = "removed" } - if res.applies { - mu.Lock() - results = append(results, res) - mu.Unlock() + service.Error(errors.NewF("tuning rule %s %s", name, label)) + mu.Lock() + for _, idx := range idxs { + states[idx].anyMissing = true } - }(name) + mu.Unlock() + return + } + service.Error(errors.NewE(err)) + tuningErrors.Inc() + return } - wg.Wait() - if anyMissing.Load() { - alert.Attempts++ - if alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { - service.Info("alert %s passed through after %d attempts (tuning rule unavailable)", alert.AlertID, alert.Attempts) - } else { - return false, true + mu.Lock() + for j, idx := range idxs { + if applies[j] { + states[idx].results = append(states[idx].results, tuneResult{ + ruleType: ruleType, confidence: confidence, applies: true, + }) } } + mu.Unlock() + }(name, idxs) + } + wg.Wait() - before := alert.Confidence - confidence, ignored := applyTuningResults(alert.Confidence, results) - if ignored { - service.Info("alert %s ignored by tuning rule", alert.AlertID) - alertsIgnored.Inc() - return true, false - } - if confidence != before { - confidenceChanged.Inc() + // Apply results and write. + for _, s := range states { + if s.anyMissing { + s.alert.Attempts++ + if s.alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { + service.Info("alert %s passed through after %d attempts (tuning rule unavailable)", s.alert.AlertID, s.alert.Attempts) + // fall through to write + } else { + payload, err := alerts.Marshal(s.alert) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + err = service.dlq.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}) + if err != nil { + service.Error(errors.NewE(err)) + } else { + alertsDLQ.Inc() + } + continue } - alert.Confidence = confidence - return false, false - }, - ) + } + + before := s.alert.Confidence + confidence, ignored := applyTuningResults(s.alert.Confidence, s.results) + if ignored { + service.Info("alert %s ignored by tuning rule", s.alert.AlertID) + alertsIgnored.Inc() + continue + } + if confidence != before { + confidenceChanged.Inc() + } + s.alert.Confidence = confidence + + payload, err := alerts.Marshal(s.alert) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + err = service.writer.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsOut.Inc() + } } // applyTuningResults applies tuning results in priority order: Ignore > SetConfidence > Increase/Decrease. diff --git a/pkg/alerts/alert.go b/pkg/alerts/alert.go index 8d5b68c..9f6f930 100644 --- a/pkg/alerts/alert.go +++ b/pkg/alerts/alert.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "sort" + "strings" "time" "github.com/google/uuid" @@ -17,15 +18,15 @@ import ( // Alert struct encapsulates a single alert and handles serialization type Alert struct { - AlertID string - Attempts int - Cluster string - Created time.Time - Dispatched time.Time - Event events.Event - Staged bool - OutputsSent []string - EnrichmentsApplied []string + AlertID string + Attempts int + Cluster string + Created time.Time + Dispatched time.Time + Event events.Event + Staged bool + OutputsSent []string + EnrichmentsApplied []string LogSource string LogType string @@ -210,6 +211,20 @@ func (a *Alert) MergeEnabled() bool { return len(a.Rule.MergeByKeys()) > 0 && a.Rule.MergeWindowMins() > 0 } +// MergePartitionKey returns a stable Kafka partition key for this alert so that alerts belonging to the same merge group always land on the same partition and therefore the same alert-merger replica. +// The key is "rule_name|key1=val1|key2=val2" with merge-by fields sorted alphabetically. When merge is not enabled the rule name alone is returned, which is still a stable key - the merger will pass those alerts straight through on whichever partition they arrive. +func (a *Alert) MergePartitionKey() string { + keys := a.Rule.MergeByKeys() + sort.Strings(keys) + merged := a.Event.GetMergedKeys(keys) + parts := make([]string, 0, len(keys)+1) + parts = append(parts, a.Rule.Name()) + for _, k := range keys { + parts = append(parts, fmt.Sprintf("%v", merged[k])) + } + return strings.Join(parts, "|") +} + func (a *Alert) RemainingOutputs(requiredOutputs []string) []string { var outputsToSendNow []string if a.MergeEnabled() { diff --git a/pkg/enrichments/enrichment.go b/pkg/enrichments/enrichment.go index 41a8e87..6b326d0 100644 --- a/pkg/enrichments/enrichment.go +++ b/pkg/enrichments/enrichment.go @@ -52,7 +52,7 @@ func ValidateDependencyGraph(enrichments []IEnrichment) error { } type IEnrichment interface { - Enrich(ctx context.Context, alert *alerts.Alert) errors.Error + Enrich(ctx context.Context, alrts []*alerts.Alert) errors.Error // DependsOn returns plugin names that must run before this enrichment. DependsOn() []string @@ -60,6 +60,7 @@ type IEnrichment interface { Name() string Description() string Enabled() bool + Version() string Checksum() string String() string } diff --git a/pkg/enrichments/rpc_enrichment.go b/pkg/enrichments/rpc_enrichment.go index 2f5930c..3373ef9 100644 --- a/pkg/enrichments/rpc_enrichment.go +++ b/pkg/enrichments/rpc_enrichment.go @@ -28,29 +28,34 @@ func (r *rpcEnrichment) Id() string { func (r *rpcEnrichment) Name() string { return r.meta.GetName() } func (r *rpcEnrichment) Description() string { return r.meta.GetDescription() } func (r *rpcEnrichment) Enabled() bool { return r.meta.GetEnabled() } +func (r *rpcEnrichment) Version() string { return r.meta.GetVersion() } func (r *rpcEnrichment) Checksum() string { return r.checksum } func (r *rpcEnrichment) DependsOn() []string { return r.meta.GetDependsOn() } func (r *rpcEnrichment) String() string { return "RpcEnrichment '" + r.meta.GetName() + "' id:'" + r.meta.GetId() + "'" } -func (r *rpcEnrichment) Enrich(ctx context.Context, alert *alerts.Alert) errors.Error { - b, err := json.Marshal(alert.Event) - if err != nil { - return errors.New(err) +func (r *rpcEnrichment) Enrich(ctx context.Context, alrts []*alerts.Alert) errors.Error { + protoAlerts := make([]*rpc_enrichments.Alert, 0, len(alrts)) + for _, alrt := range alrts { + b, err := json.Marshal(alrt.Event) + if err != nil { + return errors.New(err) + } + protoAlerts = append(protoAlerts, &rpc_enrichments.Alert{Json: b}) } - resp, err := r.client.Enrich(ctx, &rpc_enrichments.EnrichRequest{ - Alert: &rpc_enrichments.Alert{Json: b}, - }) + resp, err := r.client.EnrichBatch(ctx, &rpc_enrichments.EnrichBatchRequest{Alerts: protoAlerts}) if err != nil { return errors.New(err) } - var enriched map[string]any - if err := json.Unmarshal(resp.GetAlert().GetJson(), &enriched); err != nil { - return errors.New(err) - } - for k, v := range enriched { - alert.Event[k] = v + for i, a := range resp.GetAlerts() { + var enriched map[string]any + if err := json.Unmarshal(a.GetJson(), &enriched); err != nil { + return errors.New(err) + } + for k, v := range enriched { + alrts[i].Event[k] = v + } } return nil } diff --git a/pkg/enrichments/rpc_enrichments/enrichment.pb.go b/pkg/enrichments/rpc_enrichments/enrichment.pb.go index 51285a6..5ec1745 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment.pb.go +++ b/pkg/enrichments/rpc_enrichments/enrichment.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: enrichment.proto +// source: pkg/enrichments/rpc_enrichments/enrichment.proto package rpc_enrichments @@ -29,7 +29,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_enrichment_proto_msgTypes[0] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_enrichment_proto_msgTypes[0] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,7 +54,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_enrichment_proto_rawDescGZIP(), []int{0} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{0} } type EnrichmentMetadata struct { @@ -71,7 +71,7 @@ type EnrichmentMetadata struct { func (x *EnrichmentMetadata) Reset() { *x = EnrichmentMetadata{} - mi := &file_enrichment_proto_msgTypes[1] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -83,7 +83,7 @@ func (x *EnrichmentMetadata) String() string { func (*EnrichmentMetadata) ProtoMessage() {} func (x *EnrichmentMetadata) ProtoReflect() protoreflect.Message { - mi := &file_enrichment_proto_msgTypes[1] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -96,7 +96,7 @@ func (x *EnrichmentMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use EnrichmentMetadata.ProtoReflect.Descriptor instead. func (*EnrichmentMetadata) Descriptor() ([]byte, []int) { - return file_enrichment_proto_rawDescGZIP(), []int{1} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{1} } func (x *EnrichmentMetadata) GetId() string { @@ -150,7 +150,7 @@ type Alert struct { func (x *Alert) Reset() { *x = Alert{} - mi := &file_enrichment_proto_msgTypes[2] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -162,7 +162,7 @@ func (x *Alert) String() string { func (*Alert) ProtoMessage() {} func (x *Alert) ProtoReflect() protoreflect.Message { - mi := &file_enrichment_proto_msgTypes[2] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -175,7 +175,7 @@ func (x *Alert) ProtoReflect() protoreflect.Message { // Deprecated: Use Alert.ProtoReflect.Descriptor instead. func (*Alert) Descriptor() ([]byte, []int) { - return file_enrichment_proto_rawDescGZIP(), []int{2} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{2} } func (x *Alert) GetJson() []byte { @@ -194,7 +194,7 @@ type EnrichRequest struct { func (x *EnrichRequest) Reset() { *x = EnrichRequest{} - mi := &file_enrichment_proto_msgTypes[3] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -206,7 +206,7 @@ func (x *EnrichRequest) String() string { func (*EnrichRequest) ProtoMessage() {} func (x *EnrichRequest) ProtoReflect() protoreflect.Message { - mi := &file_enrichment_proto_msgTypes[3] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -219,7 +219,7 @@ func (x *EnrichRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EnrichRequest.ProtoReflect.Descriptor instead. func (*EnrichRequest) Descriptor() ([]byte, []int) { - return file_enrichment_proto_rawDescGZIP(), []int{3} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{3} } func (x *EnrichRequest) GetAlert() *Alert { @@ -238,7 +238,7 @@ type EnrichResponse struct { func (x *EnrichResponse) Reset() { *x = EnrichResponse{} - mi := &file_enrichment_proto_msgTypes[4] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -250,7 +250,7 @@ func (x *EnrichResponse) String() string { func (*EnrichResponse) ProtoMessage() {} func (x *EnrichResponse) ProtoReflect() protoreflect.Message { - mi := &file_enrichment_proto_msgTypes[4] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -263,7 +263,7 @@ func (x *EnrichResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EnrichResponse.ProtoReflect.Descriptor instead. func (*EnrichResponse) Descriptor() ([]byte, []int) { - return file_enrichment_proto_rawDescGZIP(), []int{4} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{4} } func (x *EnrichResponse) GetAlert() *Alert { @@ -273,11 +273,99 @@ func (x *EnrichResponse) GetAlert() *Alert { return nil } -var File_enrichment_proto protoreflect.FileDescriptor +type EnrichBatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Alerts []*Alert `protobuf:"bytes,1,rep,name=alerts,proto3" json:"alerts,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnrichBatchRequest) Reset() { + *x = EnrichBatchRequest{} + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnrichBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnrichBatchRequest) ProtoMessage() {} + +func (x *EnrichBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnrichBatchRequest.ProtoReflect.Descriptor instead. +func (*EnrichBatchRequest) Descriptor() ([]byte, []int) { + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{5} +} + +func (x *EnrichBatchRequest) GetAlerts() []*Alert { + if x != nil { + return x.Alerts + } + return nil +} + +type EnrichBatchResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Alerts []*Alert `protobuf:"bytes,1,rep,name=alerts,proto3" json:"alerts,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnrichBatchResponse) Reset() { + *x = EnrichBatchResponse{} + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnrichBatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnrichBatchResponse) ProtoMessage() {} + +func (x *EnrichBatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnrichBatchResponse.ProtoReflect.Descriptor instead. +func (*EnrichBatchResponse) Descriptor() ([]byte, []int) { + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{6} +} + +func (x *EnrichBatchResponse) GetAlerts() []*Alert { + if x != nil { + return x.Alerts + } + return nil +} + +var File_pkg_enrichments_rpc_enrichments_enrichment_proto protoreflect.FileDescriptor -const file_enrichment_proto_rawDesc = "" + +const file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc = "" + "\n" + - "\x10enrichment.proto\x12\venrichments\"\a\n" + + "0pkg/enrichments/rpc_enrichments/enrichment.proto\x12\venrichments\"\a\n" + "\x05Empty\"\xad\x01\n" + "\x12EnrichmentMetadata\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + @@ -292,75 +380,86 @@ const file_enrichment_proto_rawDesc = "" + "\rEnrichRequest\x12(\n" + "\x05alert\x18\x01 \x01(\v2\x12.enrichments.AlertR\x05alert\":\n" + "\x0eEnrichResponse\x12(\n" + - "\x05alert\x18\x01 \x01(\v2\x12.enrichments.AlertR\x05alert2\xa7\x02\n" + + "\x05alert\x18\x01 \x01(\v2\x12.enrichments.AlertR\x05alert\"@\n" + + "\x12EnrichBatchRequest\x12*\n" + + "\x06alerts\x18\x01 \x03(\v2\x12.enrichments.AlertR\x06alerts\"A\n" + + "\x13EnrichBatchResponse\x12*\n" + + "\x06alerts\x18\x01 \x03(\v2\x12.enrichments.AlertR\x06alerts2\xf9\x02\n" + "\n" + "Enrichment\x12B\n" + "\vGetMetadata\x12\x12.enrichments.Empty\x1a\x1f.enrichments.EnrichmentMetadata\x12.\n" + "\x04Init\x12\x12.enrichments.Empty\x1a\x12.enrichments.Empty\x12A\n" + - "\x06Enrich\x12\x1a.enrichments.EnrichRequest\x1a\x1b.enrichments.EnrichResponse\x122\n" + + "\x06Enrich\x12\x1a.enrichments.EnrichRequest\x1a\x1b.enrichments.EnrichResponse\x12P\n" + + "\vEnrichBatch\x12\x1f.enrichments.EnrichBatchRequest\x1a .enrichments.EnrichBatchResponse\x122\n" + "\bShutdown\x12\x12.enrichments.Empty\x1a\x12.enrichments.Empty\x12.\n" + "\x04Ping\x12\x12.enrichments.Empty\x1a\x12.enrichments.EmptyB\"Z rpc_enrichments/;rpc_enrichmentsb\x06proto3" var ( - file_enrichment_proto_rawDescOnce sync.Once - file_enrichment_proto_rawDescData []byte + file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescOnce sync.Once + file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescData []byte ) -func file_enrichment_proto_rawDescGZIP() []byte { - file_enrichment_proto_rawDescOnce.Do(func() { - file_enrichment_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_enrichment_proto_rawDesc), len(file_enrichment_proto_rawDesc))) +func file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP() []byte { + file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescOnce.Do(func() { + file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc), len(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc))) }) - return file_enrichment_proto_rawDescData -} - -var file_enrichment_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_enrichment_proto_goTypes = []any{ - (*Empty)(nil), // 0: enrichments.Empty - (*EnrichmentMetadata)(nil), // 1: enrichments.EnrichmentMetadata - (*Alert)(nil), // 2: enrichments.Alert - (*EnrichRequest)(nil), // 3: enrichments.EnrichRequest - (*EnrichResponse)(nil), // 4: enrichments.EnrichResponse -} -var file_enrichment_proto_depIdxs = []int32{ - 2, // 0: enrichments.EnrichRequest.alert:type_name -> enrichments.Alert - 2, // 1: enrichments.EnrichResponse.alert:type_name -> enrichments.Alert - 0, // 2: enrichments.Enrichment.GetMetadata:input_type -> enrichments.Empty - 0, // 3: enrichments.Enrichment.Init:input_type -> enrichments.Empty - 3, // 4: enrichments.Enrichment.Enrich:input_type -> enrichments.EnrichRequest - 0, // 5: enrichments.Enrichment.Shutdown:input_type -> enrichments.Empty - 0, // 6: enrichments.Enrichment.Ping:input_type -> enrichments.Empty - 1, // 7: enrichments.Enrichment.GetMetadata:output_type -> enrichments.EnrichmentMetadata - 0, // 8: enrichments.Enrichment.Init:output_type -> enrichments.Empty - 4, // 9: enrichments.Enrichment.Enrich:output_type -> enrichments.EnrichResponse - 0, // 10: enrichments.Enrichment.Shutdown:output_type -> enrichments.Empty - 0, // 11: enrichments.Enrichment.Ping:output_type -> enrichments.Empty - 7, // [7:12] is the sub-list for method output_type - 2, // [2:7] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_enrichment_proto_init() } -func file_enrichment_proto_init() { - if File_enrichment_proto != nil { + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescData +} + +var file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_enrichments_rpc_enrichments_enrichment_proto_goTypes = []any{ + (*Empty)(nil), // 0: enrichments.Empty + (*EnrichmentMetadata)(nil), // 1: enrichments.EnrichmentMetadata + (*Alert)(nil), // 2: enrichments.Alert + (*EnrichRequest)(nil), // 3: enrichments.EnrichRequest + (*EnrichResponse)(nil), // 4: enrichments.EnrichResponse + (*EnrichBatchRequest)(nil), // 5: enrichments.EnrichBatchRequest + (*EnrichBatchResponse)(nil), // 6: enrichments.EnrichBatchResponse +} +var file_pkg_enrichments_rpc_enrichments_enrichment_proto_depIdxs = []int32{ + 2, // 0: enrichments.EnrichRequest.alert:type_name -> enrichments.Alert + 2, // 1: enrichments.EnrichResponse.alert:type_name -> enrichments.Alert + 2, // 2: enrichments.EnrichBatchRequest.alerts:type_name -> enrichments.Alert + 2, // 3: enrichments.EnrichBatchResponse.alerts:type_name -> enrichments.Alert + 0, // 4: enrichments.Enrichment.GetMetadata:input_type -> enrichments.Empty + 0, // 5: enrichments.Enrichment.Init:input_type -> enrichments.Empty + 3, // 6: enrichments.Enrichment.Enrich:input_type -> enrichments.EnrichRequest + 5, // 7: enrichments.Enrichment.EnrichBatch:input_type -> enrichments.EnrichBatchRequest + 0, // 8: enrichments.Enrichment.Shutdown:input_type -> enrichments.Empty + 0, // 9: enrichments.Enrichment.Ping:input_type -> enrichments.Empty + 1, // 10: enrichments.Enrichment.GetMetadata:output_type -> enrichments.EnrichmentMetadata + 0, // 11: enrichments.Enrichment.Init:output_type -> enrichments.Empty + 4, // 12: enrichments.Enrichment.Enrich:output_type -> enrichments.EnrichResponse + 6, // 13: enrichments.Enrichment.EnrichBatch:output_type -> enrichments.EnrichBatchResponse + 0, // 14: enrichments.Enrichment.Shutdown:output_type -> enrichments.Empty + 0, // 15: enrichments.Enrichment.Ping:output_type -> enrichments.Empty + 10, // [10:16] is the sub-list for method output_type + 4, // [4:10] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_pkg_enrichments_rpc_enrichments_enrichment_proto_init() } +func file_pkg_enrichments_rpc_enrichments_enrichment_proto_init() { + if File_pkg_enrichments_rpc_enrichments_enrichment_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_enrichment_proto_rawDesc), len(file_enrichment_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc), len(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc)), NumEnums: 0, - NumMessages: 5, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_enrichment_proto_goTypes, - DependencyIndexes: file_enrichment_proto_depIdxs, - MessageInfos: file_enrichment_proto_msgTypes, + GoTypes: file_pkg_enrichments_rpc_enrichments_enrichment_proto_goTypes, + DependencyIndexes: file_pkg_enrichments_rpc_enrichments_enrichment_proto_depIdxs, + MessageInfos: file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes, }.Build() - File_enrichment_proto = out.File - file_enrichment_proto_goTypes = nil - file_enrichment_proto_depIdxs = nil + File_pkg_enrichments_rpc_enrichments_enrichment_proto = out.File + file_pkg_enrichments_rpc_enrichments_enrichment_proto_goTypes = nil + file_pkg_enrichments_rpc_enrichments_enrichment_proto_depIdxs = nil } diff --git a/pkg/enrichments/rpc_enrichments/enrichment.proto b/pkg/enrichments/rpc_enrichments/enrichment.proto index 108afd3..96e6f2f 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment.proto +++ b/pkg/enrichments/rpc_enrichments/enrichment.proto @@ -12,13 +12,16 @@ message EnrichmentMetadata { string version = 6; } message Alert { bytes json = 1; } -message EnrichRequest { Alert alert = 1; } +message EnrichRequest { Alert alert = 1; } message EnrichResponse { Alert alert = 1; } +message EnrichBatchRequest { repeated Alert alerts = 1; } +message EnrichBatchResponse { repeated Alert alerts = 1; } service Enrichment { - rpc GetMetadata(Empty) returns (EnrichmentMetadata); - rpc Init(Empty) returns (Empty); - rpc Enrich(EnrichRequest) returns (EnrichResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc GetMetadata(Empty) returns (EnrichmentMetadata); + rpc Init(Empty) returns (Empty); + rpc Enrich(EnrichRequest) returns (EnrichResponse); + rpc EnrichBatch(EnrichBatchRequest) returns (EnrichBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go b/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go index 204c57a..63c5821 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go +++ b/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v7.34.0 -// source: enrichment.proto +// source: pkg/enrichments/rpc_enrichments/enrichment.proto package rpc_enrichments @@ -22,6 +22,7 @@ const ( Enrichment_GetMetadata_FullMethodName = "/enrichments.Enrichment/GetMetadata" Enrichment_Init_FullMethodName = "/enrichments.Enrichment/Init" Enrichment_Enrich_FullMethodName = "/enrichments.Enrichment/Enrich" + Enrichment_EnrichBatch_FullMethodName = "/enrichments.Enrichment/EnrichBatch" Enrichment_Shutdown_FullMethodName = "/enrichments.Enrichment/Shutdown" Enrichment_Ping_FullMethodName = "/enrichments.Enrichment/Ping" ) @@ -33,6 +34,7 @@ type EnrichmentClient interface { GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*EnrichmentMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Enrich(ctx context.Context, in *EnrichRequest, opts ...grpc.CallOption) (*EnrichResponse, error) + EnrichBatch(ctx context.Context, in *EnrichBatchRequest, opts ...grpc.CallOption) (*EnrichBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } @@ -75,6 +77,16 @@ func (c *enrichmentClient) Enrich(ctx context.Context, in *EnrichRequest, opts . return out, nil } +func (c *enrichmentClient) EnrichBatch(ctx context.Context, in *EnrichBatchRequest, opts ...grpc.CallOption) (*EnrichBatchResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(EnrichBatchResponse) + err := c.cc.Invoke(ctx, Enrichment_EnrichBatch_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *enrichmentClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -102,6 +114,7 @@ type EnrichmentServer interface { GetMetadata(context.Context, *Empty) (*EnrichmentMetadata, error) Init(context.Context, *Empty) (*Empty, error) Enrich(context.Context, *EnrichRequest) (*EnrichResponse, error) + EnrichBatch(context.Context, *EnrichBatchRequest) (*EnrichBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) mustEmbedUnimplementedEnrichmentServer() @@ -123,6 +136,9 @@ func (UnimplementedEnrichmentServer) Init(context.Context, *Empty) (*Empty, erro func (UnimplementedEnrichmentServer) Enrich(context.Context, *EnrichRequest) (*EnrichResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Enrich not implemented") } +func (UnimplementedEnrichmentServer) EnrichBatch(context.Context, *EnrichBatchRequest) (*EnrichBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnrichBatch not implemented") +} func (UnimplementedEnrichmentServer) Shutdown(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") } @@ -204,6 +220,24 @@ func _Enrichment_Enrich_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _Enrichment_EnrichBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnrichBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EnrichmentServer).EnrichBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Enrichment_EnrichBatch_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EnrichmentServer).EnrichBatch(ctx, req.(*EnrichBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Enrichment_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -259,6 +293,10 @@ var Enrichment_ServiceDesc = grpc.ServiceDesc{ MethodName: "Enrich", Handler: _Enrichment_Enrich_Handler, }, + { + MethodName: "EnrichBatch", + Handler: _Enrichment_EnrichBatch_Handler, + }, { MethodName: "Shutdown", Handler: _Enrichment_Shutdown_Handler, @@ -269,5 +307,5 @@ var Enrichment_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "enrichment.proto", + Metadata: "pkg/enrichments/rpc_enrichments/enrichment.proto", } diff --git a/pkg/enrichments/sdk/serve.go b/pkg/enrichments/sdk/serve.go index df08ca6..2b54fab 100644 --- a/pkg/enrichments/sdk/serve.go +++ b/pkg/enrichments/sdk/serve.go @@ -82,6 +82,26 @@ func (s *server) Enrich(ctx context.Context, req *rpc_enrichments.EnrichRequest) return &rpc_enrichments.EnrichResponse{Alert: &rpc_enrichments.Alert{Json: b}}, nil } +func (s *server) EnrichBatch(ctx context.Context, req *rpc_enrichments.EnrichBatchRequest) (*rpc_enrichments.EnrichBatchResponse, error) { + results := make([]*rpc_enrichments.Alert, 0, len(req.GetAlerts())) + for _, a := range req.GetAlerts() { + var alert map[string]any + if err := json.Unmarshal(a.GetJson(), &alert); err != nil { + return nil, err + } + enriched, err := s.enrichment.Enrich(ctx, alert) + if err != nil { + return nil, err + } + b, err2 := json.Marshal(enriched) + if err2 != nil { + return nil, err2 + } + results = append(results, &rpc_enrichments.Alert{Json: b}) + } + return &rpc_enrichments.EnrichBatchResponse{Alerts: results}, nil +} + func (s *server) Ping(_ context.Context, _ *rpc_enrichments.Empty) (*rpc_enrichments.Empty, error) { return &rpc_enrichments.Empty{}, nil } diff --git a/pkg/formatters/formatter.go b/pkg/formatters/formatter.go index 9168ff7..2fe78e9 100644 --- a/pkg/formatters/formatter.go +++ b/pkg/formatters/formatter.go @@ -8,12 +8,13 @@ import ( ) type IFormatter interface { - Format(ctx context.Context, alert *alerts.Alert) (map[string]any, errors.Error) + Format(ctx context.Context, alrts []*alerts.Alert) ([]map[string]any, errors.Error) Id() string Name() string Description() string Enabled() bool + Version() string Checksum() string String() string } diff --git a/pkg/formatters/rpc_formatter.go b/pkg/formatters/rpc_formatter.go index 261f3fb..90b0d91 100644 --- a/pkg/formatters/rpc_formatter.go +++ b/pkg/formatters/rpc_formatter.go @@ -29,23 +29,32 @@ func (f *rpcFormatter) Id() string { func (f *rpcFormatter) Name() string { return f.meta.GetName() } func (f *rpcFormatter) Description() string { return f.meta.GetDescription() } func (f *rpcFormatter) Enabled() bool { return f.meta.GetEnabled() } +func (f *rpcFormatter) Version() string { return f.meta.GetVersion() } func (f *rpcFormatter) Checksum() string { return f.checksum } func (f *rpcFormatter) String() string { return fmt.Sprintf("Formatter '%s' (id:%s, enabled:%t)", f.meta.GetName(), f.meta.GetId(), f.meta.GetEnabled()) } -func (f *rpcFormatter) Format(ctx context.Context, alert *alerts.Alert) (map[string]any, errors.Error) { - b, err := json.Marshal(alert) - if err != nil { - return nil, errors.NewE(err) +func (f *rpcFormatter) Format(ctx context.Context, alrts []*alerts.Alert) ([]map[string]any, errors.Error) { + alertJSONs := make([][]byte, 0, len(alrts)) + for _, alrt := range alrts { + b, err := json.Marshal(alrt) + if err != nil { + return nil, errors.NewE(err) + } + alertJSONs = append(alertJSONs, b) } - resp, err := f.client.Format(ctx, &rpc_formatters.FormatRequest{AlertJson: b}) + resp, err := f.client.FormatBatch(ctx, &rpc_formatters.FormatBatchRequest{AlertJson: alertJSONs}) if err != nil { return nil, errors.NewE(err) } - var result map[string]any - if err := json.Unmarshal(resp.GetResultJson(), &result); err != nil { - return nil, errors.NewE(err) + results := make([]map[string]any, len(resp.GetResultJson())) + for i, raw := range resp.GetResultJson() { + var result map[string]any + if err := json.Unmarshal(raw, &result); err != nil { + return nil, errors.NewE(err) + } + results[i] = result } - return result, nil + return results, nil } diff --git a/pkg/formatters/rpc_formatters/formatter.pb.go b/pkg/formatters/rpc_formatters/formatter.pb.go index 03314c7..209cd55 100644 --- a/pkg/formatters/rpc_formatters/formatter.pb.go +++ b/pkg/formatters/rpc_formatters/formatter.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: formatter.proto +// source: pkg/formatters/rpc_formatters/formatter.proto package rpc_formatters @@ -29,7 +29,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_formatter_proto_msgTypes[0] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_formatter_proto_msgTypes[0] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,7 +54,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_formatter_proto_rawDescGZIP(), []int{0} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{0} } type FormatterMetadata struct { @@ -70,7 +70,7 @@ type FormatterMetadata struct { func (x *FormatterMetadata) Reset() { *x = FormatterMetadata{} - mi := &file_formatter_proto_msgTypes[1] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -82,7 +82,7 @@ func (x *FormatterMetadata) String() string { func (*FormatterMetadata) ProtoMessage() {} func (x *FormatterMetadata) ProtoReflect() protoreflect.Message { - mi := &file_formatter_proto_msgTypes[1] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -95,7 +95,7 @@ func (x *FormatterMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use FormatterMetadata.ProtoReflect.Descriptor instead. func (*FormatterMetadata) Descriptor() ([]byte, []int) { - return file_formatter_proto_rawDescGZIP(), []int{1} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{1} } func (x *FormatterMetadata) GetId() string { @@ -142,7 +142,7 @@ type FormatRequest struct { func (x *FormatRequest) Reset() { *x = FormatRequest{} - mi := &file_formatter_proto_msgTypes[2] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -154,7 +154,7 @@ func (x *FormatRequest) String() string { func (*FormatRequest) ProtoMessage() {} func (x *FormatRequest) ProtoReflect() protoreflect.Message { - mi := &file_formatter_proto_msgTypes[2] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -167,7 +167,7 @@ func (x *FormatRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FormatRequest.ProtoReflect.Descriptor instead. func (*FormatRequest) Descriptor() ([]byte, []int) { - return file_formatter_proto_rawDescGZIP(), []int{2} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{2} } func (x *FormatRequest) GetAlertJson() []byte { @@ -186,7 +186,7 @@ type FormatResponse struct { func (x *FormatResponse) Reset() { *x = FormatResponse{} - mi := &file_formatter_proto_msgTypes[3] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -198,7 +198,7 @@ func (x *FormatResponse) String() string { func (*FormatResponse) ProtoMessage() {} func (x *FormatResponse) ProtoReflect() protoreflect.Message { - mi := &file_formatter_proto_msgTypes[3] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -211,7 +211,7 @@ func (x *FormatResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FormatResponse.ProtoReflect.Descriptor instead. func (*FormatResponse) Descriptor() ([]byte, []int) { - return file_formatter_proto_rawDescGZIP(), []int{3} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{3} } func (x *FormatResponse) GetResultJson() []byte { @@ -221,88 +221,188 @@ func (x *FormatResponse) GetResultJson() []byte { return nil } -var File_formatter_proto protoreflect.FileDescriptor +type FormatBatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded alerts.Alert per alert + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FormatBatchRequest) Reset() { + *x = FormatBatchRequest{} + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FormatBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormatBatchRequest) ProtoMessage() {} + +func (x *FormatBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormatBatchRequest.ProtoReflect.Descriptor instead. +func (*FormatBatchRequest) Descriptor() ([]byte, []int) { + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{4} +} -const file_formatter_proto_rawDesc = "" + +func (x *FormatBatchRequest) GetAlertJson() [][]byte { + if x != nil { + return x.AlertJson + } + return nil +} + +type FormatBatchResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ResultJson [][]byte `protobuf:"bytes,1,rep,name=result_json,json=resultJson,proto3" json:"result_json,omitempty"` // one JSON-encoded map[string]any per alert + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FormatBatchResponse) Reset() { + *x = FormatBatchResponse{} + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FormatBatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormatBatchResponse) ProtoMessage() {} + +func (x *FormatBatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormatBatchResponse.ProtoReflect.Descriptor instead. +func (*FormatBatchResponse) Descriptor() ([]byte, []int) { + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{5} +} + +func (x *FormatBatchResponse) GetResultJson() [][]byte { + if x != nil { + return x.ResultJson + } + return nil +} + +var File_pkg_formatters_rpc_formatters_formatter_proto protoreflect.FileDescriptor + +const file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc = "" + "\n" + - "\x0fformatter.proto\x12\n" + + "-pkg/formatters/rpc_formatters/formatter.proto\x12\n" + "formatters\"\a\n" + - "\x05Empty\"s\n" + + "\x05Empty\"\x8d\x01\n" + "\x11FormatterMetadata\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\".\n" + + "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x18\n" + + "\aversion\x18\x05 \x01(\tR\aversion\".\n" + "\rFormatRequest\x12\x1d\n" + "\n" + "alert_json\x18\x01 \x01(\fR\talertJson\"1\n" + "\x0eFormatResponse\x12\x1f\n" + "\vresult_json\x18\x01 \x01(\fR\n" + - "resultJson2\x9b\x02\n" + + "resultJson\"3\n" + + "\x12FormatBatchRequest\x12\x1d\n" + + "\n" + + "alert_json\x18\x01 \x03(\fR\talertJson\"6\n" + + "\x13FormatBatchResponse\x12\x1f\n" + + "\vresult_json\x18\x01 \x03(\fR\n" + + "resultJson2\xeb\x02\n" + "\tFormatter\x12?\n" + "\vGetMetadata\x12\x11.formatters.Empty\x1a\x1d.formatters.FormatterMetadata\x12,\n" + "\x04Init\x12\x11.formatters.Empty\x1a\x11.formatters.Empty\x12?\n" + - "\x06Format\x12\x19.formatters.FormatRequest\x1a\x1a.formatters.FormatResponse\x120\n" + + "\x06Format\x12\x19.formatters.FormatRequest\x1a\x1a.formatters.FormatResponse\x12N\n" + + "\vFormatBatch\x12\x1e.formatters.FormatBatchRequest\x1a\x1f.formatters.FormatBatchResponse\x120\n" + "\bShutdown\x12\x11.formatters.Empty\x1a\x11.formatters.Empty\x12,\n" + "\x04Ping\x12\x11.formatters.Empty\x1a\x11.formatters.EmptyB;Z9github.com/harishhary/blink/pkg/formatters/rpc_formattersb\x06proto3" var ( - file_formatter_proto_rawDescOnce sync.Once - file_formatter_proto_rawDescData []byte + file_pkg_formatters_rpc_formatters_formatter_proto_rawDescOnce sync.Once + file_pkg_formatters_rpc_formatters_formatter_proto_rawDescData []byte ) -func file_formatter_proto_rawDescGZIP() []byte { - file_formatter_proto_rawDescOnce.Do(func() { - file_formatter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_formatter_proto_rawDesc), len(file_formatter_proto_rawDesc))) +func file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP() []byte { + file_pkg_formatters_rpc_formatters_formatter_proto_rawDescOnce.Do(func() { + file_pkg_formatters_rpc_formatters_formatter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc), len(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc))) }) - return file_formatter_proto_rawDescData + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescData } -var file_formatter_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_formatter_proto_goTypes = []any{ - (*Empty)(nil), // 0: formatters.Empty - (*FormatterMetadata)(nil), // 1: formatters.FormatterMetadata - (*FormatRequest)(nil), // 2: formatters.FormatRequest - (*FormatResponse)(nil), // 3: formatters.FormatResponse +var file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_formatters_rpc_formatters_formatter_proto_goTypes = []any{ + (*Empty)(nil), // 0: formatters.Empty + (*FormatterMetadata)(nil), // 1: formatters.FormatterMetadata + (*FormatRequest)(nil), // 2: formatters.FormatRequest + (*FormatResponse)(nil), // 3: formatters.FormatResponse + (*FormatBatchRequest)(nil), // 4: formatters.FormatBatchRequest + (*FormatBatchResponse)(nil), // 5: formatters.FormatBatchResponse } -var file_formatter_proto_depIdxs = []int32{ +var file_pkg_formatters_rpc_formatters_formatter_proto_depIdxs = []int32{ 0, // 0: formatters.Formatter.GetMetadata:input_type -> formatters.Empty 0, // 1: formatters.Formatter.Init:input_type -> formatters.Empty 2, // 2: formatters.Formatter.Format:input_type -> formatters.FormatRequest - 0, // 3: formatters.Formatter.Shutdown:input_type -> formatters.Empty - 0, // 4: formatters.Formatter.Ping:input_type -> formatters.Empty - 1, // 5: formatters.Formatter.GetMetadata:output_type -> formatters.FormatterMetadata - 0, // 6: formatters.Formatter.Init:output_type -> formatters.Empty - 3, // 7: formatters.Formatter.Format:output_type -> formatters.FormatResponse - 0, // 8: formatters.Formatter.Shutdown:output_type -> formatters.Empty - 0, // 9: formatters.Formatter.Ping:output_type -> formatters.Empty - 5, // [5:10] is the sub-list for method output_type - 0, // [0:5] is the sub-list for method input_type + 4, // 3: formatters.Formatter.FormatBatch:input_type -> formatters.FormatBatchRequest + 0, // 4: formatters.Formatter.Shutdown:input_type -> formatters.Empty + 0, // 5: formatters.Formatter.Ping:input_type -> formatters.Empty + 1, // 6: formatters.Formatter.GetMetadata:output_type -> formatters.FormatterMetadata + 0, // 7: formatters.Formatter.Init:output_type -> formatters.Empty + 3, // 8: formatters.Formatter.Format:output_type -> formatters.FormatResponse + 5, // 9: formatters.Formatter.FormatBatch:output_type -> formatters.FormatBatchResponse + 0, // 10: formatters.Formatter.Shutdown:output_type -> formatters.Empty + 0, // 11: formatters.Formatter.Ping:output_type -> formatters.Empty + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } -func init() { file_formatter_proto_init() } -func file_formatter_proto_init() { - if File_formatter_proto != nil { +func init() { file_pkg_formatters_rpc_formatters_formatter_proto_init() } +func file_pkg_formatters_rpc_formatters_formatter_proto_init() { + if File_pkg_formatters_rpc_formatters_formatter_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_formatter_proto_rawDesc), len(file_formatter_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc), len(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc)), NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_formatter_proto_goTypes, - DependencyIndexes: file_formatter_proto_depIdxs, - MessageInfos: file_formatter_proto_msgTypes, + GoTypes: file_pkg_formatters_rpc_formatters_formatter_proto_goTypes, + DependencyIndexes: file_pkg_formatters_rpc_formatters_formatter_proto_depIdxs, + MessageInfos: file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes, }.Build() - File_formatter_proto = out.File - file_formatter_proto_goTypes = nil - file_formatter_proto_depIdxs = nil + File_pkg_formatters_rpc_formatters_formatter_proto = out.File + file_pkg_formatters_rpc_formatters_formatter_proto_goTypes = nil + file_pkg_formatters_rpc_formatters_formatter_proto_depIdxs = nil } diff --git a/pkg/formatters/rpc_formatters/formatter.proto b/pkg/formatters/rpc_formatters/formatter.proto index aa3e1d7..ce27f8f 100644 --- a/pkg/formatters/rpc_formatters/formatter.proto +++ b/pkg/formatters/rpc_formatters/formatter.proto @@ -22,10 +22,19 @@ message FormatResponse { bytes result_json = 1; // JSON-encoded map[string]any } +message FormatBatchRequest { + repeated bytes alert_json = 1; // one JSON-encoded alerts.Alert per alert +} + +message FormatBatchResponse { + repeated bytes result_json = 1; // one JSON-encoded map[string]any per alert +} + service Formatter { - rpc GetMetadata(Empty) returns (FormatterMetadata); - rpc Init(Empty) returns (Empty); - rpc Format(FormatRequest) returns (FormatResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc GetMetadata(Empty) returns (FormatterMetadata); + rpc Init(Empty) returns (Empty); + rpc Format(FormatRequest) returns (FormatResponse); + rpc FormatBatch(FormatBatchRequest) returns (FormatBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/formatters/rpc_formatters/formatter_grpc.pb.go b/pkg/formatters/rpc_formatters/formatter_grpc.pb.go index 68abcc0..0a47e16 100644 --- a/pkg/formatters/rpc_formatters/formatter_grpc.pb.go +++ b/pkg/formatters/rpc_formatters/formatter_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v7.34.0 -// source: formatter.proto +// source: pkg/formatters/rpc_formatters/formatter.proto package rpc_formatters @@ -22,6 +22,7 @@ const ( Formatter_GetMetadata_FullMethodName = "/formatters.Formatter/GetMetadata" Formatter_Init_FullMethodName = "/formatters.Formatter/Init" Formatter_Format_FullMethodName = "/formatters.Formatter/Format" + Formatter_FormatBatch_FullMethodName = "/formatters.Formatter/FormatBatch" Formatter_Shutdown_FullMethodName = "/formatters.Formatter/Shutdown" Formatter_Ping_FullMethodName = "/formatters.Formatter/Ping" ) @@ -33,6 +34,7 @@ type FormatterClient interface { GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*FormatterMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Format(ctx context.Context, in *FormatRequest, opts ...grpc.CallOption) (*FormatResponse, error) + FormatBatch(ctx context.Context, in *FormatBatchRequest, opts ...grpc.CallOption) (*FormatBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } @@ -75,6 +77,16 @@ func (c *formatterClient) Format(ctx context.Context, in *FormatRequest, opts .. return out, nil } +func (c *formatterClient) FormatBatch(ctx context.Context, in *FormatBatchRequest, opts ...grpc.CallOption) (*FormatBatchResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(FormatBatchResponse) + err := c.cc.Invoke(ctx, Formatter_FormatBatch_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *formatterClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -102,6 +114,7 @@ type FormatterServer interface { GetMetadata(context.Context, *Empty) (*FormatterMetadata, error) Init(context.Context, *Empty) (*Empty, error) Format(context.Context, *FormatRequest) (*FormatResponse, error) + FormatBatch(context.Context, *FormatBatchRequest) (*FormatBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) mustEmbedUnimplementedFormatterServer() @@ -123,6 +136,9 @@ func (UnimplementedFormatterServer) Init(context.Context, *Empty) (*Empty, error func (UnimplementedFormatterServer) Format(context.Context, *FormatRequest) (*FormatResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Format not implemented") } +func (UnimplementedFormatterServer) FormatBatch(context.Context, *FormatBatchRequest) (*FormatBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FormatBatch not implemented") +} func (UnimplementedFormatterServer) Shutdown(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") } @@ -204,6 +220,24 @@ func _Formatter_Format_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Formatter_FormatBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FormatBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FormatterServer).FormatBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Formatter_FormatBatch_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FormatterServer).FormatBatch(ctx, req.(*FormatBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Formatter_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -259,6 +293,10 @@ var Formatter_ServiceDesc = grpc.ServiceDesc{ MethodName: "Format", Handler: _Formatter_Format_Handler, }, + { + MethodName: "FormatBatch", + Handler: _Formatter_FormatBatch_Handler, + }, { MethodName: "Shutdown", Handler: _Formatter_Shutdown_Handler, @@ -269,5 +307,5 @@ var Formatter_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "formatter.proto", + Metadata: "pkg/formatters/rpc_formatters/formatter.proto", } diff --git a/pkg/formatters/sdk/serve.go b/pkg/formatters/sdk/serve.go index 103d7a1..ca59e15 100644 --- a/pkg/formatters/sdk/serve.go +++ b/pkg/formatters/sdk/serve.go @@ -73,6 +73,26 @@ func (s *server) Format(ctx context.Context, req *rpc_formatters.FormatRequest) return &rpc_formatters.FormatResponse{ResultJson: b}, nil } +func (s *server) FormatBatch(ctx context.Context, req *rpc_formatters.FormatBatchRequest) (*rpc_formatters.FormatBatchResponse, error) { + results := make([][]byte, 0, len(req.GetAlertJson())) + for _, raw := range req.GetAlertJson() { + var alert map[string]any + if err := json.Unmarshal(raw, &alert); err != nil { + return nil, err + } + result, err := s.formatter.Format(ctx, alert) + if err != nil { + return nil, err + } + b, err2 := json.Marshal(result) + if err2 != nil { + return nil, err2 + } + results = append(results, b) + } + return &rpc_formatters.FormatBatchResponse{ResultJson: results}, nil +} + func (s *server) Ping(_ context.Context, _ *rpc_formatters.Empty) (*rpc_formatters.Empty, error) { return &rpc_formatters.Empty{}, nil } diff --git a/pkg/matchers/matcher.go b/pkg/matchers/matcher.go index 187f6a8..17c74b2 100644 --- a/pkg/matchers/matcher.go +++ b/pkg/matchers/matcher.go @@ -12,7 +12,8 @@ type Matcher interface { Name() string Description() string Enabled() bool + Version() string Checksum() string String() string - Match(ctx context.Context, event events.Event) (bool, errors.Error) + Match(ctx context.Context, evts []events.Event) ([]bool, errors.Error) } diff --git a/pkg/matchers/rpc_matcher.go b/pkg/matchers/rpc_matcher.go index 9e6f83d..05118a0 100644 --- a/pkg/matchers/rpc_matcher.go +++ b/pkg/matchers/rpc_matcher.go @@ -35,19 +35,24 @@ func (r *rpcMatcher) Id() string { func (r *rpcMatcher) Name() string { return r.meta.GetName() } func (r *rpcMatcher) Description() string { return r.meta.GetDescription() } func (r *rpcMatcher) Enabled() bool { return r.meta.GetEnabled() } +func (r *rpcMatcher) Version() string { return r.meta.GetVersion() } func (r *rpcMatcher) Checksum() string { return r.checksum } func (r *rpcMatcher) String() string { return "RpcMatcher '" + r.meta.GetName() + "' id:'" + r.meta.GetId() + "'" } -func (r *rpcMatcher) Match(ctx context.Context, event events.Event) (bool, errors.Error) { - b, err := json.Marshal(event) - if err != nil { - return false, errors.New(err) +func (r *rpcMatcher) Match(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { + protoEvents := make([]*rpc_matchers.Event, 0, len(evts)) + for _, ev := range evts { + b, err := json.Marshal(ev) + if err != nil { + return nil, errors.New(err) + } + protoEvents = append(protoEvents, &rpc_matchers.Event{Json: b}) } - resp, err := r.client.Match(ctx, &rpc_matchers.MatchRequest{Event: &rpc_matchers.Event{Json: b}}) + resp, err := r.client.MatchBatch(ctx, &rpc_matchers.MatchBatchRequest{Events: protoEvents}) if err != nil { - return false, errors.New(err) + return nil, errors.New(err) } return resp.GetMatched(), nil } diff --git a/pkg/matchers/rpc_matchers/matcher.pb.go b/pkg/matchers/rpc_matchers/matcher.pb.go index 7754ac2..051d6a4 100644 --- a/pkg/matchers/rpc_matchers/matcher.pb.go +++ b/pkg/matchers/rpc_matchers/matcher.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: matcher.proto +// source: pkg/matchers/rpc_matchers/matcher.proto package rpc_matchers @@ -29,7 +29,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_matcher_proto_msgTypes[0] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_matcher_proto_msgTypes[0] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,7 +54,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_matcher_proto_rawDescGZIP(), []int{0} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{0} } type MatcherMetadata struct { @@ -71,7 +71,7 @@ type MatcherMetadata struct { func (x *MatcherMetadata) Reset() { *x = MatcherMetadata{} - mi := &file_matcher_proto_msgTypes[1] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -83,7 +83,7 @@ func (x *MatcherMetadata) String() string { func (*MatcherMetadata) ProtoMessage() {} func (x *MatcherMetadata) ProtoReflect() protoreflect.Message { - mi := &file_matcher_proto_msgTypes[1] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -96,7 +96,7 @@ func (x *MatcherMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use MatcherMetadata.ProtoReflect.Descriptor instead. func (*MatcherMetadata) Descriptor() ([]byte, []int) { - return file_matcher_proto_rawDescGZIP(), []int{1} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{1} } func (x *MatcherMetadata) GetId() string { @@ -150,7 +150,7 @@ type Event struct { func (x *Event) Reset() { *x = Event{} - mi := &file_matcher_proto_msgTypes[2] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -162,7 +162,7 @@ func (x *Event) String() string { func (*Event) ProtoMessage() {} func (x *Event) ProtoReflect() protoreflect.Message { - mi := &file_matcher_proto_msgTypes[2] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -175,7 +175,7 @@ func (x *Event) ProtoReflect() protoreflect.Message { // Deprecated: Use Event.ProtoReflect.Descriptor instead. func (*Event) Descriptor() ([]byte, []int) { - return file_matcher_proto_rawDescGZIP(), []int{2} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{2} } func (x *Event) GetJson() []byte { @@ -194,7 +194,7 @@ type MatchRequest struct { func (x *MatchRequest) Reset() { *x = MatchRequest{} - mi := &file_matcher_proto_msgTypes[3] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -206,7 +206,7 @@ func (x *MatchRequest) String() string { func (*MatchRequest) ProtoMessage() {} func (x *MatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_matcher_proto_msgTypes[3] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -219,7 +219,7 @@ func (x *MatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchRequest.ProtoReflect.Descriptor instead. func (*MatchRequest) Descriptor() ([]byte, []int) { - return file_matcher_proto_rawDescGZIP(), []int{3} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{3} } func (x *MatchRequest) GetEvent() *Event { @@ -238,7 +238,7 @@ type MatchResponse struct { func (x *MatchResponse) Reset() { *x = MatchResponse{} - mi := &file_matcher_proto_msgTypes[4] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -250,7 +250,7 @@ func (x *MatchResponse) String() string { func (*MatchResponse) ProtoMessage() {} func (x *MatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_matcher_proto_msgTypes[4] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -263,7 +263,7 @@ func (x *MatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchResponse.ProtoReflect.Descriptor instead. func (*MatchResponse) Descriptor() ([]byte, []int) { - return file_matcher_proto_rawDescGZIP(), []int{4} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{4} } func (x *MatchResponse) GetMatched() bool { @@ -273,11 +273,99 @@ func (x *MatchResponse) GetMatched() bool { return false } -var File_matcher_proto protoreflect.FileDescriptor +type MatchBatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MatchBatchRequest) Reset() { + *x = MatchBatchRequest{} + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MatchBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchBatchRequest) ProtoMessage() {} + +func (x *MatchBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchBatchRequest.ProtoReflect.Descriptor instead. +func (*MatchBatchRequest) Descriptor() ([]byte, []int) { + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{5} +} + +func (x *MatchBatchRequest) GetEvents() []*Event { + if x != nil { + return x.Events + } + return nil +} + +type MatchBatchResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Matched []bool `protobuf:"varint,1,rep,packed,name=matched,proto3" json:"matched,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MatchBatchResponse) Reset() { + *x = MatchBatchResponse{} + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MatchBatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -const file_matcher_proto_rawDesc = "" + +func (*MatchBatchResponse) ProtoMessage() {} + +func (x *MatchBatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchBatchResponse.ProtoReflect.Descriptor instead. +func (*MatchBatchResponse) Descriptor() ([]byte, []int) { + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{6} +} + +func (x *MatchBatchResponse) GetMatched() []bool { + if x != nil { + return x.Matched + } + return nil +} + +var File_pkg_matchers_rpc_matchers_matcher_proto protoreflect.FileDescriptor + +const file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc = "" + "\n" + - "\rmatcher.proto\x12\bmatchers\"\a\n" + + "'pkg/matchers/rpc_matchers/matcher.proto\x12\bmatchers\"\a\n" + "\x05Empty\"\xa3\x01\n" + "\x0fMatcherMetadata\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + @@ -291,73 +379,84 @@ const file_matcher_proto_rawDesc = "" + "\fMatchRequest\x12%\n" + "\x05event\x18\x01 \x01(\v2\x0f.matchers.EventR\x05event\")\n" + "\rMatchResponse\x12\x18\n" + - "\amatched\x18\x01 \x01(\bR\amatched2\x80\x02\n" + + "\amatched\x18\x01 \x01(\bR\amatched\"<\n" + + "\x11MatchBatchRequest\x12'\n" + + "\x06events\x18\x01 \x03(\v2\x0f.matchers.EventR\x06events\".\n" + + "\x12MatchBatchResponse\x12\x18\n" + + "\amatched\x18\x01 \x03(\bR\amatched2\xc9\x02\n" + "\aMatcher\x129\n" + "\vGetMetadata\x12\x0f.matchers.Empty\x1a\x19.matchers.MatcherMetadata\x12(\n" + "\x04Init\x12\x0f.matchers.Empty\x1a\x0f.matchers.Empty\x128\n" + - "\x05Match\x12\x16.matchers.MatchRequest\x1a\x17.matchers.MatchResponse\x12,\n" + + "\x05Match\x12\x16.matchers.MatchRequest\x1a\x17.matchers.MatchResponse\x12G\n" + + "\n" + + "MatchBatch\x12\x1b.matchers.MatchBatchRequest\x1a\x1c.matchers.MatchBatchResponse\x12,\n" + "\bShutdown\x12\x0f.matchers.Empty\x1a\x0f.matchers.Empty\x12(\n" + "\x04Ping\x12\x0f.matchers.Empty\x1a\x0f.matchers.EmptyB\x1cZ\x1arpc_matchers/;rpc_matchersb\x06proto3" var ( - file_matcher_proto_rawDescOnce sync.Once - file_matcher_proto_rawDescData []byte + file_pkg_matchers_rpc_matchers_matcher_proto_rawDescOnce sync.Once + file_pkg_matchers_rpc_matchers_matcher_proto_rawDescData []byte ) -func file_matcher_proto_rawDescGZIP() []byte { - file_matcher_proto_rawDescOnce.Do(func() { - file_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_matcher_proto_rawDesc), len(file_matcher_proto_rawDesc))) +func file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP() []byte { + file_pkg_matchers_rpc_matchers_matcher_proto_rawDescOnce.Do(func() { + file_pkg_matchers_rpc_matchers_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc), len(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc))) }) - return file_matcher_proto_rawDescData + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescData } -var file_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_matcher_proto_goTypes = []any{ - (*Empty)(nil), // 0: matchers.Empty - (*MatcherMetadata)(nil), // 1: matchers.MatcherMetadata - (*Event)(nil), // 2: matchers.Event - (*MatchRequest)(nil), // 3: matchers.MatchRequest - (*MatchResponse)(nil), // 4: matchers.MatchResponse +var file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_matchers_rpc_matchers_matcher_proto_goTypes = []any{ + (*Empty)(nil), // 0: matchers.Empty + (*MatcherMetadata)(nil), // 1: matchers.MatcherMetadata + (*Event)(nil), // 2: matchers.Event + (*MatchRequest)(nil), // 3: matchers.MatchRequest + (*MatchResponse)(nil), // 4: matchers.MatchResponse + (*MatchBatchRequest)(nil), // 5: matchers.MatchBatchRequest + (*MatchBatchResponse)(nil), // 6: matchers.MatchBatchResponse } -var file_matcher_proto_depIdxs = []int32{ +var file_pkg_matchers_rpc_matchers_matcher_proto_depIdxs = []int32{ 2, // 0: matchers.MatchRequest.event:type_name -> matchers.Event - 0, // 1: matchers.Matcher.GetMetadata:input_type -> matchers.Empty - 0, // 2: matchers.Matcher.Init:input_type -> matchers.Empty - 3, // 3: matchers.Matcher.Match:input_type -> matchers.MatchRequest - 0, // 4: matchers.Matcher.Shutdown:input_type -> matchers.Empty - 0, // 5: matchers.Matcher.Ping:input_type -> matchers.Empty - 1, // 6: matchers.Matcher.GetMetadata:output_type -> matchers.MatcherMetadata - 0, // 7: matchers.Matcher.Init:output_type -> matchers.Empty - 4, // 8: matchers.Matcher.Match:output_type -> matchers.MatchResponse - 0, // 9: matchers.Matcher.Shutdown:output_type -> matchers.Empty - 0, // 10: matchers.Matcher.Ping:output_type -> matchers.Empty - 6, // [6:11] is the sub-list for method output_type - 1, // [1:6] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_matcher_proto_init() } -func file_matcher_proto_init() { - if File_matcher_proto != nil { + 2, // 1: matchers.MatchBatchRequest.events:type_name -> matchers.Event + 0, // 2: matchers.Matcher.GetMetadata:input_type -> matchers.Empty + 0, // 3: matchers.Matcher.Init:input_type -> matchers.Empty + 3, // 4: matchers.Matcher.Match:input_type -> matchers.MatchRequest + 5, // 5: matchers.Matcher.MatchBatch:input_type -> matchers.MatchBatchRequest + 0, // 6: matchers.Matcher.Shutdown:input_type -> matchers.Empty + 0, // 7: matchers.Matcher.Ping:input_type -> matchers.Empty + 1, // 8: matchers.Matcher.GetMetadata:output_type -> matchers.MatcherMetadata + 0, // 9: matchers.Matcher.Init:output_type -> matchers.Empty + 4, // 10: matchers.Matcher.Match:output_type -> matchers.MatchResponse + 6, // 11: matchers.Matcher.MatchBatch:output_type -> matchers.MatchBatchResponse + 0, // 12: matchers.Matcher.Shutdown:output_type -> matchers.Empty + 0, // 13: matchers.Matcher.Ping:output_type -> matchers.Empty + 8, // [8:14] is the sub-list for method output_type + 2, // [2:8] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_matchers_rpc_matchers_matcher_proto_init() } +func file_pkg_matchers_rpc_matchers_matcher_proto_init() { + if File_pkg_matchers_rpc_matchers_matcher_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_matcher_proto_rawDesc), len(file_matcher_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc), len(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc)), NumEnums: 0, - NumMessages: 5, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_matcher_proto_goTypes, - DependencyIndexes: file_matcher_proto_depIdxs, - MessageInfos: file_matcher_proto_msgTypes, + GoTypes: file_pkg_matchers_rpc_matchers_matcher_proto_goTypes, + DependencyIndexes: file_pkg_matchers_rpc_matchers_matcher_proto_depIdxs, + MessageInfos: file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes, }.Build() - File_matcher_proto = out.File - file_matcher_proto_goTypes = nil - file_matcher_proto_depIdxs = nil + File_pkg_matchers_rpc_matchers_matcher_proto = out.File + file_pkg_matchers_rpc_matchers_matcher_proto_goTypes = nil + file_pkg_matchers_rpc_matchers_matcher_proto_depIdxs = nil } diff --git a/pkg/matchers/rpc_matchers/matcher.proto b/pkg/matchers/rpc_matchers/matcher.proto index 74c1b13..15bcf76 100644 --- a/pkg/matchers/rpc_matchers/matcher.proto +++ b/pkg/matchers/rpc_matchers/matcher.proto @@ -12,13 +12,16 @@ message MatcherMetadata { string version = 6; } message Event { bytes json = 1; } -message MatchRequest { Event event = 1; } +message MatchRequest { Event event = 1; } message MatchResponse { bool matched = 1; } +message MatchBatchRequest { repeated Event events = 1; } +message MatchBatchResponse { repeated bool matched = 1; } service Matcher { - rpc GetMetadata(Empty) returns (MatcherMetadata); - rpc Init(Empty) returns (Empty); - rpc Match(MatchRequest) returns (MatchResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc GetMetadata(Empty) returns (MatcherMetadata); + rpc Init(Empty) returns (Empty); + rpc Match(MatchRequest) returns (MatchResponse); + rpc MatchBatch(MatchBatchRequest) returns (MatchBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/matchers/rpc_matchers/matcher_grpc.pb.go b/pkg/matchers/rpc_matchers/matcher_grpc.pb.go index 8b4ce51..a76a5d6 100644 --- a/pkg/matchers/rpc_matchers/matcher_grpc.pb.go +++ b/pkg/matchers/rpc_matchers/matcher_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v7.34.0 -// source: matcher.proto +// source: pkg/matchers/rpc_matchers/matcher.proto package rpc_matchers @@ -22,6 +22,7 @@ const ( Matcher_GetMetadata_FullMethodName = "/matchers.Matcher/GetMetadata" Matcher_Init_FullMethodName = "/matchers.Matcher/Init" Matcher_Match_FullMethodName = "/matchers.Matcher/Match" + Matcher_MatchBatch_FullMethodName = "/matchers.Matcher/MatchBatch" Matcher_Shutdown_FullMethodName = "/matchers.Matcher/Shutdown" Matcher_Ping_FullMethodName = "/matchers.Matcher/Ping" ) @@ -33,6 +34,7 @@ type MatcherClient interface { GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MatcherMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Match(ctx context.Context, in *MatchRequest, opts ...grpc.CallOption) (*MatchResponse, error) + MatchBatch(ctx context.Context, in *MatchBatchRequest, opts ...grpc.CallOption) (*MatchBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } @@ -75,6 +77,16 @@ func (c *matcherClient) Match(ctx context.Context, in *MatchRequest, opts ...grp return out, nil } +func (c *matcherClient) MatchBatch(ctx context.Context, in *MatchBatchRequest, opts ...grpc.CallOption) (*MatchBatchResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MatchBatchResponse) + err := c.cc.Invoke(ctx, Matcher_MatchBatch_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matcherClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -102,6 +114,7 @@ type MatcherServer interface { GetMetadata(context.Context, *Empty) (*MatcherMetadata, error) Init(context.Context, *Empty) (*Empty, error) Match(context.Context, *MatchRequest) (*MatchResponse, error) + MatchBatch(context.Context, *MatchBatchRequest) (*MatchBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) mustEmbedUnimplementedMatcherServer() @@ -123,6 +136,9 @@ func (UnimplementedMatcherServer) Init(context.Context, *Empty) (*Empty, error) func (UnimplementedMatcherServer) Match(context.Context, *MatchRequest) (*MatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Match not implemented") } +func (UnimplementedMatcherServer) MatchBatch(context.Context, *MatchBatchRequest) (*MatchBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MatchBatch not implemented") +} func (UnimplementedMatcherServer) Shutdown(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") } @@ -204,6 +220,24 @@ func _Matcher_Match_Handler(srv interface{}, ctx context.Context, dec func(inter return interceptor(ctx, in, info, handler) } +func _Matcher_MatchBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MatchBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatcherServer).MatchBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Matcher_MatchBatch_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatcherServer).MatchBatch(ctx, req.(*MatchBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Matcher_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -259,6 +293,10 @@ var Matcher_ServiceDesc = grpc.ServiceDesc{ MethodName: "Match", Handler: _Matcher_Match_Handler, }, + { + MethodName: "MatchBatch", + Handler: _Matcher_MatchBatch_Handler, + }, { MethodName: "Shutdown", Handler: _Matcher_Shutdown_Handler, @@ -269,5 +307,5 @@ var Matcher_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "matcher.proto", + Metadata: "pkg/matchers/rpc_matchers/matcher.proto", } diff --git a/pkg/matchers/sdk/serve.go b/pkg/matchers/sdk/serve.go index bf60aad..0c43a54 100644 --- a/pkg/matchers/sdk/serve.go +++ b/pkg/matchers/sdk/serve.go @@ -81,6 +81,22 @@ func (s *server) Match(ctx context.Context, req *rpc_matchers.MatchRequest) (*rp return &rpc_matchers.MatchResponse{Matched: matched}, nil } +func (s *server) MatchBatch(ctx context.Context, req *rpc_matchers.MatchBatchRequest) (*rpc_matchers.MatchBatchResponse, error) { + results := make([]bool, 0, len(req.GetEvents())) + for _, ev := range req.GetEvents() { + var event events.Event + if err := json.Unmarshal(ev.GetJson(), &event); err != nil { + return nil, err + } + matched, err := s.matcher.Match(ctx, event) + if err != nil { + return nil, err + } + results = append(results, matched) + } + return &rpc_matchers.MatchBatchResponse{Matched: results}, nil +} + func (s *server) Ping(_ context.Context, _ *rpc_matchers.Empty) (*rpc_matchers.Empty, error) { return &rpc_matchers.Empty{}, nil } diff --git a/pkg/rules/rpc_rules.go b/pkg/rules/rpc_rules.go index 9eeb6fc..c8f9317 100644 --- a/pkg/rules/rpc_rules.go +++ b/pkg/rules/rpc_rules.go @@ -234,19 +234,7 @@ func (r *rpcRule) SubKeysInEvent(event events.Event) bool { } // ctx carries the caller's deadline (e.g. the executor's per-event timeout). -func (r *rpcRule) Evaluate(ctx context.Context, event events.Event) (bool, errors.Error) { - b, err := json.Marshal(event) - if err != nil { - return false, errors.New(err) - } - resp, err := r.client.Evaluate(ctx, &rpc_rules.EvaluateRequest{Event: &rpc_rules.Event{Json: b}}) - if err != nil { - return false, errors.New(err) - } - return resp.GetMatched(), nil -} - -func (r *rpcRule) EvaluateBatch(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { +func (r *rpcRule) Evaluate(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { protoEvents := make([]*rpc_rules.Event, 0, len(evts)) for _, ev := range evts { b, err := json.Marshal(ev) diff --git a/pkg/rules/rule.go b/pkg/rules/rule.go index 642cf44..7693a76 100644 --- a/pkg/rules/rule.go +++ b/pkg/rules/rule.go @@ -41,10 +41,12 @@ type Metadata interface { Version() string } -// Rule is the full interface for live rule plugins: metadata + evaluation. +// Rule is the full interface for live rule plugins: metadata + batch evaluation. +// All rules receive a slice of events and return a matched bool per event. +// The SDK server handles looping over individual events on the subprocess side. type Rule interface { Metadata - Evaluate(ctx context.Context, event events.Event) (bool, errors.Error) + Evaluate(ctx context.Context, evts []events.Event) ([]bool, errors.Error) } // --- Optional capability interfaces --- diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index c65f90f..b7b1c89 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -64,14 +64,18 @@ func (r *rpcTuningRule) Confidence() scoring.Confidence { return conf } -func (r *rpcTuningRule) Tune(ctx context.Context, alert alerts.Alert) (bool, errors.Error) { - b, err := json.Marshal(alert) - if err != nil { - return false, errors.NewE(err) +func (r *rpcTuningRule) Tune(ctx context.Context, alrts []alerts.Alert) ([]bool, errors.Error) { + alertJSONs := make([][]byte, 0, len(alrts)) + for _, alrt := range alrts { + b, err := json.Marshal(alrt) + if err != nil { + return nil, errors.NewE(err) + } + alertJSONs = append(alertJSONs, b) } - resp, err := r.client.Tune(ctx, &rpc_tuning_rules.TuneRequest{AlertJson: b}) + resp, err := r.client.TuneBatch(ctx, &rpc_tuning_rules.TuneBatchRequest{AlertJson: alertJSONs}) if err != nil { - return false, errors.NewE(err) + return nil, errors.NewE(err) } return resp.GetApplies(), nil } diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go index 28bbe44..bfa408a 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: tuning_rule.proto +// source: pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto package rpc_tuning_rules @@ -29,7 +29,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_tuning_rule_proto_msgTypes[0] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_tuning_rule_proto_msgTypes[0] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,7 +54,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_tuning_rule_proto_rawDescGZIP(), []int{0} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{0} } type TuningMetadata struct { @@ -65,7 +65,7 @@ type TuningMetadata struct { Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global,omitempty"` RuleType int32 `protobuf:"varint,6,opt,name=rule_type,json=ruleType,proto3" json:"rule_type,omitempty"` // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence - Confidence string `protobuf:"bytes,7,opt,name=confidence,proto3" json:"confidence,omitempty"` // "verylow|low|medium|high|veryhigh" + Confidence string `protobuf:"bytes,7,opt,name=confidence,proto3" json:"confidence,omitempty"` // "verylow|low|medium|high|veryhigh" Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -73,7 +73,7 @@ type TuningMetadata struct { func (x *TuningMetadata) Reset() { *x = TuningMetadata{} - mi := &file_tuning_rule_proto_msgTypes[1] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -85,7 +85,7 @@ func (x *TuningMetadata) String() string { func (*TuningMetadata) ProtoMessage() {} func (x *TuningMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tuning_rule_proto_msgTypes[1] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -98,7 +98,7 @@ func (x *TuningMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use TuningMetadata.ProtoReflect.Descriptor instead. func (*TuningMetadata) Descriptor() ([]byte, []int) { - return file_tuning_rule_proto_rawDescGZIP(), []int{1} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{1} } func (x *TuningMetadata) GetId() string { @@ -166,7 +166,7 @@ type TuneRequest struct { func (x *TuneRequest) Reset() { *x = TuneRequest{} - mi := &file_tuning_rule_proto_msgTypes[2] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -178,7 +178,7 @@ func (x *TuneRequest) String() string { func (*TuneRequest) ProtoMessage() {} func (x *TuneRequest) ProtoReflect() protoreflect.Message { - mi := &file_tuning_rule_proto_msgTypes[2] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -191,7 +191,7 @@ func (x *TuneRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TuneRequest.ProtoReflect.Descriptor instead. func (*TuneRequest) Descriptor() ([]byte, []int) { - return file_tuning_rule_proto_rawDescGZIP(), []int{2} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{2} } func (x *TuneRequest) GetAlertJson() []byte { @@ -210,7 +210,7 @@ type TuneResponse struct { func (x *TuneResponse) Reset() { *x = TuneResponse{} - mi := &file_tuning_rule_proto_msgTypes[3] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -222,7 +222,7 @@ func (x *TuneResponse) String() string { func (*TuneResponse) ProtoMessage() {} func (x *TuneResponse) ProtoReflect() protoreflect.Message { - mi := &file_tuning_rule_proto_msgTypes[3] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -235,7 +235,7 @@ func (x *TuneResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TuneResponse.ProtoReflect.Descriptor instead. func (*TuneResponse) Descriptor() ([]byte, []int) { - return file_tuning_rule_proto_rawDescGZIP(), []int{3} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{3} } func (x *TuneResponse) GetApplies() bool { @@ -245,12 +245,100 @@ func (x *TuneResponse) GetApplies() bool { return false } -var File_tuning_rule_proto protoreflect.FileDescriptor +type TuneBatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded alerts.Alert per alert + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TuneBatchRequest) Reset() { + *x = TuneBatchRequest{} + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TuneBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TuneBatchRequest) ProtoMessage() {} + +func (x *TuneBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TuneBatchRequest.ProtoReflect.Descriptor instead. +func (*TuneBatchRequest) Descriptor() ([]byte, []int) { + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{4} +} -const file_tuning_rule_proto_rawDesc = "" + +func (x *TuneBatchRequest) GetAlertJson() [][]byte { + if x != nil { + return x.AlertJson + } + return nil +} + +type TuneBatchResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Applies []bool `protobuf:"varint,1,rep,packed,name=applies,proto3" json:"applies,omitempty"` // one result per alert + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TuneBatchResponse) Reset() { + *x = TuneBatchResponse{} + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TuneBatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TuneBatchResponse) ProtoMessage() {} + +func (x *TuneBatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TuneBatchResponse.ProtoReflect.Descriptor instead. +func (*TuneBatchResponse) Descriptor() ([]byte, []int) { + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{5} +} + +func (x *TuneBatchResponse) GetApplies() []bool { + if x != nil { + return x.Applies + } + return nil +} + +var File_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto protoreflect.FileDescriptor + +const file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc = "" + "\n" + - "\x11tuning_rule.proto\x12\ftuning_rules\"\a\n" + - "\x05Empty\"\xc5\x01\n" + + "3pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto\x12\ftuning_rules\"\a\n" + + "\x05Empty\"\xdf\x01\n" + "\x0eTuningMetadata\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + @@ -260,77 +348,88 @@ const file_tuning_rule_proto_rawDesc = "" + "\trule_type\x18\x06 \x01(\x05R\bruleType\x12\x1e\n" + "\n" + "confidence\x18\a \x01(\tR\n" + - "confidence\",\n" + + "confidence\x12\x18\n" + + "\aversion\x18\b \x01(\tR\aversion\",\n" + "\vTuneRequest\x12\x1d\n" + "\n" + "alert_json\x18\x01 \x01(\fR\talertJson\"(\n" + "\fTuneResponse\x12\x18\n" + - "\aapplies\x18\x01 \x01(\bR\aapplies2\xa7\x02\n" + + "\aapplies\x18\x01 \x01(\bR\aapplies\"1\n" + + "\x10TuneBatchRequest\x12\x1d\n" + + "\n" + + "alert_json\x18\x01 \x03(\fR\talertJson\"-\n" + + "\x11TuneBatchResponse\x12\x18\n" + + "\aapplies\x18\x01 \x03(\bR\aapplies2\xf5\x02\n" + "\n" + "TuningRule\x12@\n" + "\vGetMetadata\x12\x13.tuning_rules.Empty\x1a\x1c.tuning_rules.TuningMetadata\x120\n" + "\x04Init\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x12=\n" + - "\x04Tune\x12\x19.tuning_rules.TuneRequest\x1a\x1a.tuning_rules.TuneResponse\x124\n" + + "\x04Tune\x12\x19.tuning_rules.TuneRequest\x1a\x1a.tuning_rules.TuneResponse\x12L\n" + + "\tTuneBatch\x12\x1e.tuning_rules.TuneBatchRequest\x1a\x1f.tuning_rules.TuneBatchResponse\x124\n" + "\bShutdown\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x120\n" + "\x04Ping\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.EmptyB?Z=github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rulesb\x06proto3" var ( - file_tuning_rule_proto_rawDescOnce sync.Once - file_tuning_rule_proto_rawDescData []byte + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescOnce sync.Once + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescData []byte ) -func file_tuning_rule_proto_rawDescGZIP() []byte { - file_tuning_rule_proto_rawDescOnce.Do(func() { - file_tuning_rule_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_tuning_rule_proto_rawDesc), len(file_tuning_rule_proto_rawDesc))) +func file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP() []byte { + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescOnce.Do(func() { + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc), len(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc))) }) - return file_tuning_rule_proto_rawDescData + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescData } -var file_tuning_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_tuning_rule_proto_goTypes = []any{ - (*Empty)(nil), // 0: tuning_rules.Empty - (*TuningMetadata)(nil), // 1: tuning_rules.TuningMetadata - (*TuneRequest)(nil), // 2: tuning_rules.TuneRequest - (*TuneResponse)(nil), // 3: tuning_rules.TuneResponse +var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_goTypes = []any{ + (*Empty)(nil), // 0: tuning_rules.Empty + (*TuningMetadata)(nil), // 1: tuning_rules.TuningMetadata + (*TuneRequest)(nil), // 2: tuning_rules.TuneRequest + (*TuneResponse)(nil), // 3: tuning_rules.TuneResponse + (*TuneBatchRequest)(nil), // 4: tuning_rules.TuneBatchRequest + (*TuneBatchResponse)(nil), // 5: tuning_rules.TuneBatchResponse } -var file_tuning_rule_proto_depIdxs = []int32{ +var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_depIdxs = []int32{ 0, // 0: tuning_rules.TuningRule.GetMetadata:input_type -> tuning_rules.Empty 0, // 1: tuning_rules.TuningRule.Init:input_type -> tuning_rules.Empty 2, // 2: tuning_rules.TuningRule.Tune:input_type -> tuning_rules.TuneRequest - 0, // 3: tuning_rules.TuningRule.Shutdown:input_type -> tuning_rules.Empty - 0, // 4: tuning_rules.TuningRule.Ping:input_type -> tuning_rules.Empty - 1, // 5: tuning_rules.TuningRule.GetMetadata:output_type -> tuning_rules.TuningMetadata - 0, // 6: tuning_rules.TuningRule.Init:output_type -> tuning_rules.Empty - 3, // 7: tuning_rules.TuningRule.Tune:output_type -> tuning_rules.TuneResponse - 0, // 8: tuning_rules.TuningRule.Shutdown:output_type -> tuning_rules.Empty - 0, // 9: tuning_rules.TuningRule.Ping:output_type -> tuning_rules.Empty - 5, // [5:10] is the sub-list for method output_type - 0, // [0:5] is the sub-list for method input_type + 4, // 3: tuning_rules.TuningRule.TuneBatch:input_type -> tuning_rules.TuneBatchRequest + 0, // 4: tuning_rules.TuningRule.Shutdown:input_type -> tuning_rules.Empty + 0, // 5: tuning_rules.TuningRule.Ping:input_type -> tuning_rules.Empty + 1, // 6: tuning_rules.TuningRule.GetMetadata:output_type -> tuning_rules.TuningMetadata + 0, // 7: tuning_rules.TuningRule.Init:output_type -> tuning_rules.Empty + 3, // 8: tuning_rules.TuningRule.Tune:output_type -> tuning_rules.TuneResponse + 5, // 9: tuning_rules.TuningRule.TuneBatch:output_type -> tuning_rules.TuneBatchResponse + 0, // 10: tuning_rules.TuningRule.Shutdown:output_type -> tuning_rules.Empty + 0, // 11: tuning_rules.TuningRule.Ping:output_type -> tuning_rules.Empty + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } -func init() { file_tuning_rule_proto_init() } -func file_tuning_rule_proto_init() { - if File_tuning_rule_proto != nil { +func init() { file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_init() } +func file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_init() { + if File_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_tuning_rule_proto_rawDesc), len(file_tuning_rule_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc), len(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc)), NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_tuning_rule_proto_goTypes, - DependencyIndexes: file_tuning_rule_proto_depIdxs, - MessageInfos: file_tuning_rule_proto_msgTypes, + GoTypes: file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_goTypes, + DependencyIndexes: file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_depIdxs, + MessageInfos: file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes, }.Build() - File_tuning_rule_proto = out.File - file_tuning_rule_proto_goTypes = nil - file_tuning_rule_proto_depIdxs = nil + File_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto = out.File + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_goTypes = nil + file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_depIdxs = nil } diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto index 904bc89..1edbafc 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto @@ -25,10 +25,19 @@ message TuneResponse { bool applies = 1; } +message TuneBatchRequest { + repeated bytes alert_json = 1; // one JSON-encoded alerts.Alert per alert +} + +message TuneBatchResponse { + repeated bool applies = 1; // one result per alert +} + service TuningRule { - rpc GetMetadata(Empty) returns (TuningMetadata); - rpc Init(Empty) returns (Empty); - rpc Tune(TuneRequest) returns (TuneResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc GetMetadata(Empty) returns (TuningMetadata); + rpc Init(Empty) returns (Empty); + rpc Tune(TuneRequest) returns (TuneResponse); + rpc TuneBatch(TuneBatchRequest) returns (TuneBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go index c68927a..18328d5 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v7.34.0 -// source: tuning_rule.proto +// source: pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto package rpc_tuning_rules @@ -22,6 +22,7 @@ const ( TuningRule_GetMetadata_FullMethodName = "/tuning_rules.TuningRule/GetMetadata" TuningRule_Init_FullMethodName = "/tuning_rules.TuningRule/Init" TuningRule_Tune_FullMethodName = "/tuning_rules.TuningRule/Tune" + TuningRule_TuneBatch_FullMethodName = "/tuning_rules.TuningRule/TuneBatch" TuningRule_Shutdown_FullMethodName = "/tuning_rules.TuningRule/Shutdown" TuningRule_Ping_FullMethodName = "/tuning_rules.TuningRule/Ping" ) @@ -33,6 +34,7 @@ type TuningRuleClient interface { GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TuningMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Tune(ctx context.Context, in *TuneRequest, opts ...grpc.CallOption) (*TuneResponse, error) + TuneBatch(ctx context.Context, in *TuneBatchRequest, opts ...grpc.CallOption) (*TuneBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } @@ -75,6 +77,16 @@ func (c *tuningRuleClient) Tune(ctx context.Context, in *TuneRequest, opts ...gr return out, nil } +func (c *tuningRuleClient) TuneBatch(ctx context.Context, in *TuneBatchRequest, opts ...grpc.CallOption) (*TuneBatchResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(TuneBatchResponse) + err := c.cc.Invoke(ctx, TuningRule_TuneBatch_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tuningRuleClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -102,6 +114,7 @@ type TuningRuleServer interface { GetMetadata(context.Context, *Empty) (*TuningMetadata, error) Init(context.Context, *Empty) (*Empty, error) Tune(context.Context, *TuneRequest) (*TuneResponse, error) + TuneBatch(context.Context, *TuneBatchRequest) (*TuneBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) mustEmbedUnimplementedTuningRuleServer() @@ -123,6 +136,9 @@ func (UnimplementedTuningRuleServer) Init(context.Context, *Empty) (*Empty, erro func (UnimplementedTuningRuleServer) Tune(context.Context, *TuneRequest) (*TuneResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Tune not implemented") } +func (UnimplementedTuningRuleServer) TuneBatch(context.Context, *TuneBatchRequest) (*TuneBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TuneBatch not implemented") +} func (UnimplementedTuningRuleServer) Shutdown(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") } @@ -204,6 +220,24 @@ func _TuningRule_Tune_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _TuningRule_TuneBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TuneBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TuningRuleServer).TuneBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TuningRule_TuneBatch_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TuningRuleServer).TuneBatch(ctx, req.(*TuneBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TuningRule_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -259,6 +293,10 @@ var TuningRule_ServiceDesc = grpc.ServiceDesc{ MethodName: "Tune", Handler: _TuningRule_Tune_Handler, }, + { + MethodName: "TuneBatch", + Handler: _TuningRule_TuneBatch_Handler, + }, { MethodName: "Shutdown", Handler: _TuningRule_Shutdown_Handler, @@ -269,5 +307,5 @@ var TuningRule_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "tuning_rule.proto", + Metadata: "pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto", } diff --git a/pkg/tuning_rules/sdk/serve.go b/pkg/tuning_rules/sdk/serve.go index e0d5fa3..ba3fde0 100644 --- a/pkg/tuning_rules/sdk/serve.go +++ b/pkg/tuning_rules/sdk/serve.go @@ -76,6 +76,22 @@ func (s *server) Tune(ctx context.Context, req *rpc_tuning_rules.TuneRequest) (* return &rpc_tuning_rules.TuneResponse{Applies: applies}, nil } +func (s *server) TuneBatch(ctx context.Context, req *rpc_tuning_rules.TuneBatchRequest) (*rpc_tuning_rules.TuneBatchResponse, error) { + results := make([]bool, 0, len(req.GetAlertJson())) + for _, raw := range req.GetAlertJson() { + var alert map[string]any + if err := json.Unmarshal(raw, &alert); err != nil { + return nil, err + } + applies, err := s.rule.Tune(ctx, alert) + if err != nil { + return nil, err + } + results = append(results, applies) + } + return &rpc_tuning_rules.TuneBatchResponse{Applies: results}, nil +} + func (s *server) Ping(_ context.Context, _ *rpc_tuning_rules.Empty) (*rpc_tuning_rules.Empty, error) { return &rpc_tuning_rules.Empty{}, nil } diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index ee264ea..5a122f4 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -27,7 +27,7 @@ func IsValidRuleType(ruleType RuleType) bool { } type TuningRule interface { - Tune(ctx context.Context, alert alerts.Alert) (bool, errors.Error) + Tune(ctx context.Context, alrts []alerts.Alert) ([]bool, errors.Error) Id() string Name() string @@ -44,14 +44,15 @@ type TuningRule interface { // Returns (confidence, ignored, err). When ignored=true the alert should be discarded. func ProcessTuningRules(ctx context.Context, alert alerts.Alert, rules []TuningRule) (scoring.Confidence, bool, errors.Error) { confidence := alert.Confidence + batch := []alerts.Alert{alert} for _, rule := range rules { if rule.RuleType() == Ignore { - applies, err := rule.Tune(ctx, alert) + results, err := rule.Tune(ctx, batch) if err != nil { return confidence, false, err } - if applies { + if results[0] { return 0, true, nil } } @@ -60,11 +61,11 @@ func ProcessTuningRules(ctx context.Context, alert alerts.Alert, rules []TuningR setByRule := false for _, rule := range rules { if rule.RuleType() == SetConfidence { - applies, err := rule.Tune(ctx, alert) + results, err := rule.Tune(ctx, batch) if err != nil { return confidence, false, err } - if applies { + if results[0] { if !setByRule || rule.Confidence() > confidence { confidence = rule.Confidence() setByRule = true @@ -79,11 +80,11 @@ func ProcessTuningRules(ctx context.Context, alert alerts.Alert, rules []TuningR for _, rule := range rules { if rule.RuleType() == IncreaseConfidence || rule.RuleType() == DecreaseConfidence { - applies, err := rule.Tune(ctx, alert) + results, err := rule.Tune(ctx, batch) if err != nil { return confidence, false, err } - if applies { + if results[0] { if rule.RuleType() == IncreaseConfidence && rule.Confidence() > confidence { confidence = rule.Confidence() } else if rule.RuleType() == DecreaseConfidence && rule.Confidence() < confidence { From 597a8aa6240d0b8ea7c9fa4bfd036b1bccb71aee Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sun, 22 Mar 2026 21:33:18 +0100 Subject: [PATCH 04/14] removing old artefacts --- cmd/alert_enricher/enricher/enricher.go | 6 +- cmd/alert_merger/merger/merger.go | 19 ++-- examples/matchers/allow-all/main.go | 2 +- examples/rules/test-login-alert.yaml | 2 +- internal/configuration/configuration.go | 2 +- internal/pools/routing.go | 12 +-- internal/services/pipeline.go | 109 ----------------------- internal/services/service.go | 5 ++ pkg/enrichments/enrichment.go | 2 +- pkg/enrichments/launcher.go | 3 + pkg/enrichments/pool/pool.go | 32 ++++--- pkg/enrichments/rpc_enrichment.go | 8 +- pkg/formatters/formatter.go | 2 +- pkg/formatters/launcher.go | 3 + pkg/formatters/pool/pool.go | 42 +++++---- pkg/formatters/rpc_formatter.go | 6 +- pkg/matchers/launcher.go | 3 + pkg/matchers/pool/pool.go | 32 ++++--- pkg/rules/launcher.go | 27 ++++++ pkg/rules/pool/pool.go | 40 +++++---- pkg/rules/testdata/crashing_rule/main.go | 2 +- pkg/tuning_rules/launcher.go | 3 + pkg/tuning_rules/pool/pool.go | 35 +++++--- pkg/tuning_rules/rpc_tuning_rule.go | 6 +- pkg/tuning_rules/tuning_rule.go | 59 +----------- 25 files changed, 196 insertions(+), 266 deletions(-) delete mode 100644 internal/services/pipeline.go diff --git a/cmd/alert_enricher/enricher/enricher.go b/cmd/alert_enricher/enricher/enricher.go index 32388e7..3c3d3e7 100644 --- a/cmd/alert_enricher/enricher/enricher.go +++ b/cmd/alert_enricher/enricher/enricher.go @@ -138,15 +138,15 @@ func (service *EnricherService) processBatch(ctx context.Context, msgs []broker. go func(name string, idxs []int) { defer wg.Done() - alrts := make([]*alerts.Alert, len(idxs)) + alerts := make([]*alerts.Alert, len(idxs)) for j, idx := range idxs { - alrts[j] = states[idx].alert + alerts[j] = states[idx].alert } cctx, cancel := context.WithTimeout(ctx, defaultEnrichmentTimeout) defer cancel() start := time.Now() - absent, removed, errs := service.pool.Enrich(cctx, name, alrts, "") + absent, removed, errs := service.pool.Enrich(cctx, name, alerts, "") enrichmentLatency.WithLabelValues(name).Observe(time.Since(start).Seconds()) mu.Lock() diff --git a/cmd/alert_merger/merger/merger.go b/cmd/alert_merger/merger/merger.go index 88b6dac..2552810 100644 --- a/cmd/alert_merger/merger/merger.go +++ b/cmd/alert_merger/merger/merger.go @@ -17,14 +17,14 @@ import ( ) var ( - alertsIn = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_in_total"}) - alertsOut = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_out_total"}) - alertsMerged = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_merged_total"}) - groupsFlushed = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_flushed_total"}) - groupsEvicted = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_evicted_total"}) - parseErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "parse_errors_total"}) - writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "write_errors_total"}) - activeGroups = promauto.NewGauge(prometheus.GaugeOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "active_groups"}) + alertsIn = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_in_total"}) + alertsOut = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_out_total"}) + alertsMerged = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "alerts_merged_total"}) + groupsFlushed = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_flushed_total"}) + groupsEvicted = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "groups_evicted_total"}) + parseErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "parse_errors_total"}) + writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "write_errors_total"}) + activeGroups = promauto.NewGauge(prometheus.GaugeOpts{Namespace: "blink", Subsystem: "alert_merger", Name: "active_groups"}) ) // mergeGroup holds a set of alerts that share the same rule and merge-by key values and are within each other's merge window. @@ -122,7 +122,7 @@ func (s *MergerService) accumulate(ctx context.Context, alert *alerts.Alert) { return } - // Either no existing group or the window has moved on — flush the old group + // Either no existing group or the window has moved on - flush the old group // (if any) and start a new one. toFlush := make([]*mergeGroup, 0, 2) if exists { @@ -251,4 +251,3 @@ func (s *MergerService) writeAlert(ctx context.Context, alert *alerts.Alert) { } alertsOut.Inc() } - diff --git a/examples/matchers/allow-all/main.go b/examples/matchers/allow-all/main.go index 6b213e4..8ec3c21 100644 --- a/examples/matchers/allow-all/main.go +++ b/examples/matchers/allow-all/main.go @@ -14,7 +14,7 @@ func (allowAll) Metadata() sdk.MatcherMetadata { return sdk.MatcherMetadata{ ID: "allow-all", Name: "Allow All", - Description: "Matches every event — use for testing only.", + Description: "Matches every event - use for testing only.", Enabled: true, Version: "1.0.0", } diff --git a/examples/rules/test-login-alert.yaml b/examples/rules/test-login-alert.yaml index f2f6ba0..3546983 100644 --- a/examples/rules/test-login-alert.yaml +++ b/examples/rules/test-login-alert.yaml @@ -1,7 +1,7 @@ id: "00000000-0000-0000-0000-000000000001" name: "test_login_alert" display_name: "Test Login Alert" -description: "Test rule — fires on any application event. Use for end-to-end pipeline testing only." +description: "Test rule - fires on any application event. Use for end-to-end pipeline testing only." enabled: true version: "1.0.0" diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index dce4794..fe43b16 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -83,6 +83,6 @@ type MergerConfig struct { // MaxGroups caps the number of live merge groups held in memory per replica. // When the cap is exceeded the oldest group (earliest expiry) is flushed // immediately rather than waiting for its window to close. - // 0 means unlimited — only safe when merge_by_keys have low cardinality. + // 0 means unlimited - only safe when merge_by_keys have low cardinality. MaxGroups int `env:"MERGER_MAX_GROUPS,optional"` } diff --git a/internal/pools/routing.go b/internal/pools/routing.go index 352741b..cc2f11c 100644 --- a/internal/pools/routing.go +++ b/internal/pools/routing.go @@ -2,27 +2,27 @@ package pools import "sync" -// PluginRouting holds the routing configuration for a single plugin. -type PluginRouting struct { +// RoutingEntry holds the routing configuration for a single plugin. +type RoutingEntry struct { Mode RolloutMode RolloutPct float64 } -// RoutingTable is a thread-safe map from pluginID to PluginRouting. +// RoutingTable is a thread-safe map from pluginID to RoutingEntry. // Pass RoutingTable.Config() to NewProcessPool to enable live routing control. // An empty table is valid - missing entries default to blue-green. type RoutingTable struct { mu sync.RWMutex - entries map[string]PluginRouting + entries map[string]RoutingEntry } // Creates an empty RoutingTable. func NewRoutingTable() *RoutingTable { - return &RoutingTable{entries: make(map[string]PluginRouting)} + return &RoutingTable{entries: make(map[string]RoutingEntry)} } // Updates the routing config for pluginID. Reflected immediately on the next Call. -func (t *RoutingTable) Set(pluginID string, r PluginRouting) { +func (t *RoutingTable) Set(pluginID string, r RoutingEntry) { t.mu.Lock() t.entries[pluginID] = r t.mu.Unlock() diff --git a/internal/services/pipeline.go b/internal/services/pipeline.go deleted file mode 100644 index 0f3363c..0000000 --- a/internal/services/pipeline.go +++ /dev/null @@ -1,109 +0,0 @@ -package services - -import ( - "context" - - "github.com/harishhary/blink/internal/broker" - "github.com/harishhary/blink/internal/errors" - "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/pkg/alerts" -) - -// MaxPluginAttempts is the number of DLQ round-trips an alert makes when a referenced -// plugin is missing before the stage passes the alert through without that plugin. -// This prevents infinite DLQ loops while still retrying transient gaps. -const MaxPluginAttempts = 3 - -type PipelineCounters struct { - In func() // called after successful unmarshal - Out func() // called after successful write - ParseError func() // called when alerts.Unmarshal fails - WriteError func() // called when Marshal or WriteMessages fails - DLQ func() // called when an alert is dead-lettered -} - -// RunAlertPipeline is the shared Kafka read => process => write => commit loop for alert pipeline stages (tuner, enricher, formatter). -// -// process mutates alert in-place and returns: -// - skip=true to suppress the downstream write (e.g. tuning rule marked alert ignored) -// - deadLetter=true to route the alert to the DLQ writer instead of the forward topic -func RunAlertPipeline( - ctx context.Context, - log *logger.Logger, - reader broker.Reader, - writer broker.Writer, - dlq broker.Writer, - batchSize int, - counters PipelineCounters, - process func(ctx context.Context, key []byte, alert *alerts.Alert) (skip bool, deadLetter bool), -) errors.Error { - incr := func(f func()) { - if f != nil { - f() - } - } - - for { - msgs, err := reader.ReadBatch(ctx, batchSize) - if err != nil { - if ctx.Err() != nil { - return nil - } - log.Error(errors.NewE(err)) - continue - } - - for _, m := range msgs { - alert, err := alerts.Unmarshal(m.Value) - if err != nil { - incr(counters.ParseError) - log.Error(errors.NewE(err)) - continue - } - incr(counters.In) - - skip, deadLetter := process(ctx, m.Key, alert) - - if deadLetter && dlq != nil { - payload, merr := alerts.Marshal(alert) - if merr != nil { - log.Error(errors.NewE(merr)) - continue - } - if werr := dlq.WriteMessages(ctx, broker.Message{Key: m.Key, Value: payload}); werr != nil { - log.Error(errors.NewE(werr)) - } else { - incr(counters.DLQ) - } - continue - } - - if skip { - continue - } - - // Reset per-stage retry counter before forwarding so the next stage - // starts with a clean slate independent of retries in this stage. - alert.Attempts = 0 - payload, merr := alerts.Marshal(alert) - if merr != nil { - incr(counters.WriteError) - log.Error(errors.NewE(merr)) - continue - } - if werr := writer.WriteMessages(ctx, broker.Message{Key: m.Key, Value: payload}); werr != nil { - incr(counters.WriteError) - log.Error(errors.NewE(werr)) - continue - } - incr(counters.Out) - } - - if err := reader.CommitMessages(ctx, msgs...); err != nil { - if ctx.Err() != nil { - return nil - } - log.Error(errors.NewE(err)) - } - } -} diff --git a/internal/services/service.go b/internal/services/service.go index c21ca89..0e1c22d 100644 --- a/internal/services/service.go +++ b/internal/services/service.go @@ -6,6 +6,11 @@ import ( "github.com/harishhary/blink/internal/errors" ) +// MaxPluginAttempts is the number of DLQ round-trips an alert makes when a referenced +// plugin is missing before the stage passes the alert through without that plugin. +// This prevents infinite DLQ loops while still retrying transient gaps. +const MaxPluginAttempts = 3 + type Service interface { Name() string Run(ctx context.Context) errors.Error diff --git a/pkg/enrichments/enrichment.go b/pkg/enrichments/enrichment.go index 6b326d0..c3fcecf 100644 --- a/pkg/enrichments/enrichment.go +++ b/pkg/enrichments/enrichment.go @@ -52,7 +52,7 @@ func ValidateDependencyGraph(enrichments []IEnrichment) error { } type IEnrichment interface { - Enrich(ctx context.Context, alrts []*alerts.Alert) errors.Error + Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error // DependsOn returns plugin names that must run before this enrichment. DependsOn() []string diff --git a/pkg/enrichments/launcher.go b/pkg/enrichments/launcher.go index b54e569..cc9c439 100644 --- a/pkg/enrichments/launcher.go +++ b/pkg/enrichments/launcher.go @@ -42,6 +42,9 @@ func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, _ st return e, &enrichmentLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil } +// IsReady always returns true - enrichments have no YAML sidecar prerequisite. +func (l *EnrichmentAdapter) IsReady(_ string) bool { return true } +func (l *EnrichmentAdapter) IsShadow(_ string) bool { return false } func (l *EnrichmentAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } func (l *EnrichmentAdapter) Workers(_ string) int { return 1 } diff --git a/pkg/enrichments/pool/pool.go b/pkg/enrichments/pool/pool.go index 2f51cff..6206b06 100644 --- a/pkg/enrichments/pool/pool.go +++ b/pkg/enrichments/pool/pool.go @@ -23,12 +23,20 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { } } -func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alert *alerts.Alert, canaryHashKey string) (absent bool, removed bool, _ errors.Error) { - err := p.Call(ctx, enrichmentID, canaryHashKey, func(ctx context.Context, e enrichments.IEnrichment) error { +// Enrich calls enrichmentID once with all alerts, applying enrichment sequentially. +// absent/removed refer to the plugin state. errs contains per-alert errors (nil on success). +func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts.Alert, canaryHashKey string) (absent bool, removed bool, errs []errors.Error) { + errs = make([]errors.Error, len(alerts)) + err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e enrichments.IEnrichment) error { if !e.Enabled() { return nil } - return e.Enrich(ctx, alert) + if err := e.Enrich(callCtx, alerts); err != nil { + for i := range errs { + errs[i] = errors.NewE(err) + } + } + return nil }) if err != nil { if stderrors.Is(err, internal.ErrPluginNotFound) { @@ -37,18 +45,22 @@ func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alert *alerts.Al if stderrors.Is(err, internal.ErrPluginRemoved) { return false, true, nil } - return false, false, errors.NewE(err) + return false, false, []errors.Error{errors.NewE(err)} } - return false, false, nil + return false, false, errs +} + +func poolKey(e enrichments.IEnrichment) internal.PoolKey { + version := e.Version() + if cs := e.Checksum(); cs != "" { + version = version + "@" + cs + } + return internal.PoolKey{PluginID: e.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { register := func(onDrained func(), items []enrichments.IEnrichment, maxProcs int) { - version := items[0].Checksum() - if version == "" { - version = "1.0.0" - } - p.Register(internal.PoolKey{PluginID: items[0].Id(), Version: version}, items, maxProcs, onDrained) + p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { case pluginmgr.RegisterMessage[enrichments.IEnrichment]: diff --git a/pkg/enrichments/rpc_enrichment.go b/pkg/enrichments/rpc_enrichment.go index 3373ef9..017b2c9 100644 --- a/pkg/enrichments/rpc_enrichment.go +++ b/pkg/enrichments/rpc_enrichment.go @@ -35,9 +35,9 @@ func (r *rpcEnrichment) String() string { return "RpcEnrichment '" + r.meta.GetName() + "' id:'" + r.meta.GetId() + "'" } -func (r *rpcEnrichment) Enrich(ctx context.Context, alrts []*alerts.Alert) errors.Error { - protoAlerts := make([]*rpc_enrichments.Alert, 0, len(alrts)) - for _, alrt := range alrts { +func (r *rpcEnrichment) Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error { + protoAlerts := make([]*rpc_enrichments.Alert, 0, len(alerts)) + for _, alrt := range alerts { b, err := json.Marshal(alrt.Event) if err != nil { return errors.New(err) @@ -54,7 +54,7 @@ func (r *rpcEnrichment) Enrich(ctx context.Context, alrts []*alerts.Alert) error return errors.New(err) } for k, v := range enriched { - alrts[i].Event[k] = v + alerts[i].Event[k] = v } } return nil diff --git a/pkg/formatters/formatter.go b/pkg/formatters/formatter.go index 2fe78e9..1285298 100644 --- a/pkg/formatters/formatter.go +++ b/pkg/formatters/formatter.go @@ -8,7 +8,7 @@ import ( ) type IFormatter interface { - Format(ctx context.Context, alrts []*alerts.Alert) ([]map[string]any, errors.Error) + Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) Id() string Name() string diff --git a/pkg/formatters/launcher.go b/pkg/formatters/launcher.go index 126b5f3..2536274 100644 --- a/pkg/formatters/launcher.go +++ b/pkg/formatters/launcher.go @@ -43,6 +43,9 @@ func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, _ str return f, &formatterLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil } +// IsReady always returns true - formatters have no YAML sidecar prerequisite. +func (l *FormatterAdapter) IsReady(_ string) bool { return true } +func (l *FormatterAdapter) IsShadow(_ string) bool { return false } func (l *FormatterAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } func (l *FormatterAdapter) Workers(_ string) int { return 1 } diff --git a/pkg/formatters/pool/pool.go b/pkg/formatters/pool/pool.go index b0d69fd..1181339 100644 --- a/pkg/formatters/pool/pool.go +++ b/pkg/formatters/pool/pool.go @@ -23,18 +23,26 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { } } -// Format runs the formatter identified by formatterID against alert. -// It respects kill switches and the rollout mode from the current catalog snapshot. -// - absent=true: no active pool - plugin transiently missing, caller should dead-letter. -// - removed=true: plugin was explicitly deregistered, caller should drop permanently. -func (p *Pool) Format(ctx context.Context, formatterID string, alert *alerts.Alert, canaryHashKey string) (out map[string]any, absent bool, removed bool, _ errors.Error) { +// Format runs the formatter identified by formatterID against all alerts in a single pool call. +// - absent=true: plugin transiently missing, caller should dead-letter. +// - removed=true: plugin deregistered, caller should drop permanently. +// - outs/errs are per-alert (same length as alerts). +func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts.Alert, canaryHashKey string) (outs []map[string]any, absent bool, removed bool, errs []errors.Error) { + outs = make([]map[string]any, len(alerts)) + errs = make([]errors.Error, len(alerts)) err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f formatters.IFormatter) error { if !f.Enabled() { return nil } - var e errors.Error - out, e = f.Format(callCtx, alert) - return e + batchOuts, e := f.Format(callCtx, alerts) + if e != nil { + for i := range errs { + errs[i] = e + } + return nil + } + copy(outs, batchOuts) + return nil }) if err != nil { if stderrors.Is(err, internal.ErrPluginNotFound) { @@ -43,18 +51,22 @@ func (p *Pool) Format(ctx context.Context, formatterID string, alert *alerts.Ale if stderrors.Is(err, internal.ErrPluginRemoved) { return nil, false, true, nil } - return nil, false, false, errors.NewE(err) + return nil, false, false, []errors.Error{errors.NewE(err)} + } + return outs, false, false, errs +} + +func poolKey(f formatters.IFormatter) internal.PoolKey { + version := f.Version() + if cs := f.Checksum(); cs != "" { + version = version + "@" + cs } - return out, false, false, nil + return internal.PoolKey{PluginID: f.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { register := func(onDrained func(), items []formatters.IFormatter, maxProcs int) { - version := items[0].Checksum() - if version == "" { - version = "1.0.0" - } - p.Register(internal.PoolKey{PluginID: items[0].Id(), Version: version}, items, maxProcs, onDrained) + p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { case pluginmgr.RegisterMessage[formatters.IFormatter]: diff --git a/pkg/formatters/rpc_formatter.go b/pkg/formatters/rpc_formatter.go index 90b0d91..f1c4fc5 100644 --- a/pkg/formatters/rpc_formatter.go +++ b/pkg/formatters/rpc_formatter.go @@ -35,9 +35,9 @@ func (f *rpcFormatter) String() string { return fmt.Sprintf("Formatter '%s' (id:%s, enabled:%t)", f.meta.GetName(), f.meta.GetId(), f.meta.GetEnabled()) } -func (f *rpcFormatter) Format(ctx context.Context, alrts []*alerts.Alert) ([]map[string]any, errors.Error) { - alertJSONs := make([][]byte, 0, len(alrts)) - for _, alrt := range alrts { +func (f *rpcFormatter) Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) { + alertJSONs := make([][]byte, 0, len(alerts)) + for _, alrt := range alerts { b, err := json.Marshal(alrt) if err != nil { return nil, errors.NewE(err) diff --git a/pkg/matchers/launcher.go b/pkg/matchers/launcher.go index ab18dcd..ab6bca3 100644 --- a/pkg/matchers/launcher.go +++ b/pkg/matchers/launcher.go @@ -42,6 +42,9 @@ func (l *MatcherAdapter) Handshake(ctx context.Context, raw interface{}, _ strin return m, &matcherLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil } +// IsReady always returns true - matchers have no YAML sidecar prerequisite. +func (l *MatcherAdapter) IsReady(_ string) bool { return true } +func (l *MatcherAdapter) IsShadow(_ string) bool { return false } func (l *MatcherAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } func (l *MatcherAdapter) Workers(_ string) int { return 1 } diff --git a/pkg/matchers/pool/pool.go b/pkg/matchers/pool/pool.go index 9945846..06e07a2 100644 --- a/pkg/matchers/pool/pool.go +++ b/pkg/matchers/pool/pool.go @@ -22,32 +22,40 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { } } -// Runs the matcher identified by matcherID against event. -func (p *Pool) Match(ctx context.Context, matcherID string, event events.Event, canaryHashKey string) (bool, errors.Error) { - var matched bool +// Match runs the matcher identified by matcherID against all events in a single pool call. +// Disabled matchers are treated as pass-through (all results true). +func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { + var results []bool err := p.Call(ctx, matcherID, canaryHashKey, func(callCtx context.Context, m matchers.Matcher) error { if !m.Enabled() { - matched = true // treat disabled matcher as pass-through + results = make([]bool, len(evts)) + for i := range results { + results[i] = true + } return nil } var e errors.Error - matched, e = m.Match(callCtx, event) + results, e = m.Match(callCtx, evts) return e }) if err != nil { - return false, errors.NewE(err) + return nil, errors.NewE(err) } - return matched, nil + return results, nil } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering matchers in the pool. +func poolKey(m matchers.Matcher) internal.PoolKey { + version := m.Version() + if cs := m.Checksum(); cs != "" { + version = version + "@" + cs + } + return internal.PoolKey{PluginID: m.Id(), Version: version} +} + func (p *Pool) Sync(msg messaging.Message) { register := func(onDrained func(), items []matchers.Matcher, maxProcs int) { - version := items[0].Checksum() - if version == "" { - version = "1.0.0" - } - p.Register(internal.PoolKey{PluginID: items[0].Id(), Version: version}, items, maxProcs, onDrained) + p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { case pluginmgr.RegisterMessage[matchers.Matcher]: diff --git a/pkg/rules/launcher.go b/pkg/rules/launcher.go index a5128af..1a14b79 100644 --- a/pkg/rules/launcher.go +++ b/pkg/rules/launcher.go @@ -10,6 +10,7 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/pluginmgr" + internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/rules/rpc_rules" ) @@ -46,6 +47,32 @@ func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath st return rule, &ruleLifecycle{rpc: rpc}, cfg.Id(), cfg.Name(), nil } +// Reports whether this binary is safe to start: +// 1. Its YAML sidecar exists in the current registry (prevents crash loops when binary arrives on disk before YAML is flushed). +// 2. Its plugin ID has no blocking validation errors (missing/invalid version, all-shadow +// group with no stable baseline, etc.). Validation runs fresh on every call - not from +// a cached set - so there is no race between the config watcher's reload debounce and +// the manager's reconcile reacting to the same fsnotify event. +func (l *RuleAdapter) IsReady(binPath string) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil { + return false + } + return !l.Watcher.HasBlockingErrorFor(cfg.Id(), cfg.FileName()+".yaml") +} + +// IsShadow reports whether this binary's YAML declares it as a shadow or canary version. +// reconcile() starts non-shadow binaries first so the stable version always wins the active +// pool slot on a fresh start, regardless of filename alphabetical order. +func (l *RuleAdapter) IsShadow(binPath string) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil { + return false + } + m := cfg.RolloutMode() + return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow +} + // IsEnabled reports whether the rule's YAML sidecar still exists and is enabled. // Called during every reconcile func so process-zombies (binary running but YAML removed/disabled) are stopped without waiting for a binary change. func (l *RuleAdapter) IsEnabled(h *pluginmgr.PluginHandle) bool { diff --git a/pkg/rules/pool/pool.go b/pkg/rules/pool/pool.go index fc10bf0..537799c 100644 --- a/pkg/rules/pool/pool.go +++ b/pkg/rules/pool/pool.go @@ -19,12 +19,9 @@ type Pool struct { } func NewPool(watcher *config.Watcher, drainTimeout time.Duration) *Pool { - routing := func(id string) (bool, internal.RolloutMode, float64) { - meta := watcher.Current().ByID(id) - if meta == nil { - return false, internal.RolloutModeBlueGreen, 0 - } - return meta.KillSwitch(), meta.RolloutMode(), meta.RolloutPct() + routing := func(id string) (internal.RolloutMode, float64) { + re := watcher.Current().RoutingByID(id) + return re.Mode, re.RolloutPct } return &Pool{ ProcessPool: internal.NewProcessPool[rules.Rule](routing, internal.NewPoolMetrics("rules"), drainTimeout), @@ -32,32 +29,43 @@ func NewPool(watcher *config.Watcher, drainTimeout time.Duration) *Pool { } } -// Runs the rule identified by ruleID against event. -func (p *Pool) Evaluate(ctx context.Context, ruleID string, event events.Event, canaryHashKey string) (bool, errors.Error) { - var matched bool +// Evaluate runs all evts against the rule identified by ruleID in a single pool call. +func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { + var results []bool err := p.Call(ctx, ruleID, canaryHashKey, func(ctx context.Context, r rules.Rule) error { if !r.Enabled() { + results = make([]bool, len(evts)) return nil } var e errors.Error - matched, e = r.Evaluate(ctx, event) + results, e = r.Evaluate(ctx, evts) return e }) if err != nil { - return false, errors.NewE(err) + return nil, errors.NewE(err) + } + return results, nil +} + +// poolKey builds a PoolKey that is unique per binary deployment. +// Combining the YAML version with the binary checksum means a binary change +// always produces a distinct key even if the operator forgot to bump the version +// string in the rule config - preventing silent same-key overwrites in the pool. +func poolKey(r rules.Rule) internal.PoolKey { + version := r.Version() + if cs := r.Checksum(); cs != "" { + version = version + "@" + cs } - return matched, nil + return internal.PoolKey{PluginID: r.Id(), Version: version} } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering rules in the pool. func (p *Pool) Sync(msg messaging.Message) { switch m := msg.(type) { case pluginmgr.RegisterMessage[rules.Rule]: - r := m.Items[0] - p.Register(internal.PoolKey{PluginID: r.Id(), Version: r.Version()}, m.Items, m.MaxProcs, nil) + p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, nil) case pluginmgr.UpdateMessage[rules.Rule]: - r := m.Items[0] - p.Register(internal.PoolKey{PluginID: r.Id(), Version: r.Version()}, m.Items, m.MaxProcs, m.OnDrained) + p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, m.OnDrained) case pluginmgr.UnregisterMessage[rules.Rule]: p.Unregister(m.ItemID) case pluginmgr.RemoveMessage[rules.Rule]: diff --git a/pkg/rules/testdata/crashing_rule/main.go b/pkg/rules/testdata/crashing_rule/main.go index 2a85587..bf32d04 100644 --- a/pkg/rules/testdata/crashing_rule/main.go +++ b/pkg/rules/testdata/crashing_rule/main.go @@ -17,7 +17,7 @@ func (crashingRule) Evaluate(_ context.Context, _ events.Event) (bool, errors.Er } func main() { - // Exit 300ms after startup — long enough for the manager to complete the + // Exit 300ms after startup - long enough for the manager to complete the // Init handshake (~50ms), short enough for crash tests to run quickly. go func() { time.Sleep(300 * time.Millisecond) diff --git a/pkg/tuning_rules/launcher.go b/pkg/tuning_rules/launcher.go index 752176d..aa6ab0b 100644 --- a/pkg/tuning_rules/launcher.go +++ b/pkg/tuning_rules/launcher.go @@ -42,6 +42,9 @@ func (l *TuningRuleAdapter) Handshake(ctx context.Context, raw interface{}, _ st return tr, &tuningLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil } +func (l *TuningRuleAdapter) IsReady(_ string) bool { return true } +func (l *TuningRuleAdapter) IsShadow(_ string) bool { return false } + // IsEnabled always returns true - tuning rules have no YAML sidecar. func (l *TuningRuleAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } diff --git a/pkg/tuning_rules/pool/pool.go b/pkg/tuning_rules/pool/pool.go index 2f6616c..a34942c 100644 --- a/pkg/tuning_rules/pool/pool.go +++ b/pkg/tuning_rules/pool/pool.go @@ -9,6 +9,7 @@ import ( "github.com/harishhary/blink/internal/pluginmgr" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" + "github.com/harishhary/blink/pkg/scoring" tuning "github.com/harishhary/blink/pkg/tuning_rules" ) @@ -22,28 +23,40 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { } } -// Runs the tuning rule identified by tuningRuleID against alert. -func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alert alerts.Alert, canaryHashKey string) (bool, errors.Error) { - var matched bool +// Tune calls tuningRuleID once with all alerts, returning per-alert apply results. +// ruleType and confidence are rule metadata - the same for every alert in the batch. +func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Alert, canaryHashKey string) ( + ruleType tuning.RuleType, confidence scoring.Confidence, applies []bool, _ errors.Error, +) { + applies = make([]bool, len(alerts)) err := p.Call(ctx, tuningRuleID, canaryHashKey, func(callCtx context.Context, t tuning.TuningRule) error { + if !t.Enabled() { + return nil + } + ruleType = t.RuleType() + confidence = t.Confidence() var e errors.Error - matched, e = t.Tune(callCtx, alert) + applies, e = t.Tune(callCtx, alerts) return e }) if err != nil { - return false, errors.NewE(err) + return 0, 0, nil, errors.NewE(err) } - return matched, nil + return ruleType, confidence, applies, nil } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering tuning rules in the pool. +func poolKey(t tuning.TuningRule) internal.PoolKey { + version := t.Version() + if cs := t.Checksum(); cs != "" { + version = version + "@" + cs + } + return internal.PoolKey{PluginID: t.Id(), Version: version} +} + func (p *Pool) Sync(msg messaging.Message) { register := func(onDrained func(), items []tuning.TuningRule, maxProcs int) { - version := items[0].Checksum() - if version == "" { - version = "1.0.0" - } - p.Register(internal.PoolKey{PluginID: items[0].Id(), Version: version}, items, maxProcs, onDrained) + p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { case pluginmgr.RegisterMessage[tuning.TuningRule]: diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index b7b1c89..eac6448 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -64,9 +64,9 @@ func (r *rpcTuningRule) Confidence() scoring.Confidence { return conf } -func (r *rpcTuningRule) Tune(ctx context.Context, alrts []alerts.Alert) ([]bool, errors.Error) { - alertJSONs := make([][]byte, 0, len(alrts)) - for _, alrt := range alrts { +func (r *rpcTuningRule) Tune(ctx context.Context, alerts []alerts.Alert) ([]bool, errors.Error) { + alertJSONs := make([][]byte, 0, len(alerts)) + for _, alrt := range alerts { b, err := json.Marshal(alrt) if err != nil { return nil, errors.NewE(err) diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index 5a122f4..4392149 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -27,7 +27,7 @@ func IsValidRuleType(ruleType RuleType) bool { } type TuningRule interface { - Tune(ctx context.Context, alrts []alerts.Alert) ([]bool, errors.Error) + Tune(ctx context.Context, alerts []alerts.Alert) ([]bool, errors.Error) Id() string Name() string @@ -39,60 +39,3 @@ type TuningRule interface { Confidence() scoring.Confidence Checksum() string } - -// ProcessTuningRules applies tuning rules in priority order: Ignore > SetConfidence > Increase/Decrease. -// Returns (confidence, ignored, err). When ignored=true the alert should be discarded. -func ProcessTuningRules(ctx context.Context, alert alerts.Alert, rules []TuningRule) (scoring.Confidence, bool, errors.Error) { - confidence := alert.Confidence - batch := []alerts.Alert{alert} - - for _, rule := range rules { - if rule.RuleType() == Ignore { - results, err := rule.Tune(ctx, batch) - if err != nil { - return confidence, false, err - } - if results[0] { - return 0, true, nil - } - } - } - - setByRule := false - for _, rule := range rules { - if rule.RuleType() == SetConfidence { - results, err := rule.Tune(ctx, batch) - if err != nil { - return confidence, false, err - } - if results[0] { - if !setByRule || rule.Confidence() > confidence { - confidence = rule.Confidence() - setByRule = true - } - } - } - } - - if setByRule { - return confidence, false, nil - } - - for _, rule := range rules { - if rule.RuleType() == IncreaseConfidence || rule.RuleType() == DecreaseConfidence { - results, err := rule.Tune(ctx, batch) - if err != nil { - return confidence, false, err - } - if results[0] { - if rule.RuleType() == IncreaseConfidence && rule.Confidence() > confidence { - confidence = rule.Confidence() - } else if rule.RuleType() == DecreaseConfidence && rule.Confidence() < confidence { - confidence = rule.Confidence() - } - } - } - } - - return confidence, false, nil -} From 24da05e407804e72bb33048efad05a0f5b7b641c Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sun, 22 Mar 2026 21:46:29 +0100 Subject: [PATCH 05/14] fixing artefacts --- pkg/alerts/convert.go | 60 ++++++++++++++++++------------------- pkg/rules/config/watcher.go | 44 +++++++++++++++++++++++++++ pkg/rules/manager_test.go | 1 + 3 files changed, 75 insertions(+), 30 deletions(-) diff --git a/pkg/alerts/convert.go b/pkg/alerts/convert.go index da78c26..1907ae4 100644 --- a/pkg/alerts/convert.go +++ b/pkg/alerts/convert.go @@ -37,22 +37,22 @@ func AlertToProto(a *Alert) (*pb.Alert, error) { return nil, err } p := &pb.Alert{ - AlertId: a.AlertID, - Attempts: int32(a.Attempts), - Cluster: a.Cluster, - CreatedNs: a.Created.UnixNano(), - DispatchedNs: a.Dispatched.UnixNano(), - Event: eventStruct, - Staged: a.Staged, - OutputsSent: a.OutputsSent, - EnrichmentsApplied: a.EnrichmentsApplied, - LogSource: a.LogSource, - LogType: a.LogType, - SourceEntity: a.SourceEntity, - SourceService: a.SourceService, - Confidence: a.Confidence.String(), - Severity: a.Severity.String(), - Rule: ruleToProto(a.Rule), + AlertId: a.AlertID, + Attempts: int32(a.Attempts), + Cluster: a.Cluster, + CreatedNs: a.Created.UnixNano(), + DispatchedNs: a.Dispatched.UnixNano(), + Event: eventStruct, + Staged: a.Staged, + OutputsSent: a.OutputsSent, + EnrichmentsApplied: a.EnrichmentsApplied, + LogSource: a.LogSource, + LogType: a.LogType, + SourceEntity: a.SourceEntity, + SourceService: a.SourceService, + Confidence: a.Confidence.String(), + Severity: a.Severity.String(), + Rule: ruleToProto(a.Rule), } return p, nil } @@ -67,22 +67,22 @@ func ProtoToAlert(p *pb.Alert) (*Alert, error) { sev, _ := scoring.ParseSeverity(p.GetSeverity()) a := &Alert{ - AlertID: p.GetAlertId(), - Attempts: int(p.GetAttempts()), - Cluster: p.GetCluster(), - Created: time.Unix(0, p.GetCreatedNs()).UTC(), - Dispatched: time.Unix(0, p.GetDispatchedNs()).UTC(), - Event: event, + AlertID: p.GetAlertId(), + Attempts: int(p.GetAttempts()), + Cluster: p.GetCluster(), + Created: time.Unix(0, p.GetCreatedNs()).UTC(), + Dispatched: time.Unix(0, p.GetDispatchedNs()).UTC(), + Event: event, Staged: p.GetStaged(), OutputsSent: p.GetOutputsSent(), EnrichmentsApplied: p.GetEnrichmentsApplied(), - LogSource: p.GetLogSource(), - LogType: p.GetLogType(), - SourceEntity: p.GetSourceEntity(), - SourceService: p.GetSourceService(), - Confidence: conf, - Severity: sev, - Rule: protoToRuleMetadata(p.GetRule()), + LogSource: p.GetLogSource(), + LogType: p.GetLogType(), + SourceEntity: p.GetSourceEntity(), + SourceService: p.GetSourceService(), + Confidence: conf, + Severity: sev, + Rule: protoToRuleMetadata(p.GetRule()), } return a, nil } @@ -125,7 +125,7 @@ func protoToRuleMetadata(m *pb.RuleMetadata) *config.RuleMetadata { return &config.RuleMetadata{} } cfg, _ := config.New(config.RuleMetadata{ - IDField: m.GetId(), + IdField: m.GetId(), NameField: m.GetName(), DisplayNameField: m.GetDisplayName(), DescriptionField: m.GetDescription(), diff --git a/pkg/rules/config/watcher.go b/pkg/rules/config/watcher.go index d0fb2c0..df1a2ce 100644 --- a/pkg/rules/config/watcher.go +++ b/pkg/rules/config/watcher.go @@ -28,6 +28,11 @@ func NewWatcher(dir string) (*Watcher, error) { w := &Watcher{ServiceContext: sc, dir: dir} + errs := Validate(dir) + for _, err := range errs { + w.ErrorF("config validation: %v", err) + } + reg, err := NewRegistry(dir) if err != nil && reg == nil { return nil, err @@ -39,6 +44,40 @@ func NewWatcher(dir string) (*Watcher, error) { return w, nil } +// HasBlockingError reports whether pluginID currently has any blocking validation error. +// It runs Validate() fresh on every call so that IsReady() in the rule adapter always +// sees the current disk state - avoiding the race between the config watcher's reload +// debounce and the manager's reconcile firing from the same fsnotify event. +func (w *Watcher) HasBlockingError(pluginID string) bool { + if pluginID == "" { + return false + } + for _, err := range Validate(w.dir) { + if err.Blocking() && err.PluginID == pluginID { + return true + } + } + return false +} + +// HasBlockingErrorFor is like HasBlockingError but also matches by YAML file name +// (e.g. "brute_force.yaml"). This catches rules whose id: field is missing - those +// errors carry no PluginID, but do carry File set to the YAML filename. +func (w *Watcher) HasBlockingErrorFor(pluginID, yamlFile string) bool { + for _, e := range Validate(w.dir) { + if !e.Blocking() { + continue + } + if pluginID != "" && e.PluginID == pluginID { + return true + } + if yamlFile != "" && e.File == yamlFile { + return true + } + } + return false +} + // Returns the most recently loaded Registry. func (w *Watcher) Current() *Registry { return w.current.Load() @@ -88,6 +127,11 @@ func (w *Watcher) Run(ctx context.Context) errors.Error { } func (w *Watcher) reload() { + errs := Validate(w.dir) + for _, err := range errs { + w.ErrorF("config validation: %v", err) + } + reg, err := NewRegistry(w.dir) if err != nil { w.ErrorF("reload error: %v", err) diff --git a/pkg/rules/manager_test.go b/pkg/rules/manager_test.go index b228cbf..f4668e9 100644 --- a/pkg/rules/manager_test.go +++ b/pkg/rules/manager_test.go @@ -29,6 +29,7 @@ name: "simple-rule" file_name: "simple_rule" description: "always matches - used for integration tests" enabled: true +version: "1.0.0" severity: "info" confidence: "low" log_types: ["test"] From 2371d2cc913e137bb88b34aa8862a39769ddbf7c Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Sun, 22 Mar 2026 22:44:16 +0100 Subject: [PATCH 06/14] adding batching for alert_formatter and dlq handling --- .gitignore | 2 +- cmd/alert_enricher/enricher/enricher.go | 22 +-- cmd/alert_formatter/formatter/formatter.go | 202 +++++++++++++++++---- cmd/rule_tuner/tuner/tuner.go | 6 +- 4 files changed, 184 insertions(+), 48 deletions(-) diff --git a/.gitignore b/.gitignore index 0bf1064..5136a31 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,7 @@ terraform.rc ### VisualStudioCode ### .vscode/* -!.vscode/settings.json +.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json diff --git a/cmd/alert_enricher/enricher/enricher.go b/cmd/alert_enricher/enricher/enricher.go index 3c3d3e7..199758f 100644 --- a/cmd/alert_enricher/enricher/enricher.go +++ b/cmd/alert_enricher/enricher/enricher.go @@ -31,11 +31,11 @@ var ( writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_enricher", Name: "write_errors_total"}) ) -// enrichAlertState holds a decoded alert and its enrichment outcome for a batch entry. -type enrichAlertState struct { +// alertState holds a decoded alert and its enrichment outcome for a batch entry. +type alertState struct { key []byte alert *alerts.Alert - anyMissing bool + deadLetter bool } // EnricherService reads alerts from Kafka, enriches them, and writes to the formatter topic. @@ -99,7 +99,7 @@ func (service *EnricherService) Run(ctx context.Context) errors.Error { func (service *EnricherService) processBatch(ctx context.Context, msgs []broker.Message) { // Decode all alerts. - states := make([]*enrichAlertState, 0, len(msgs)) + states := make([]*alertState, 0, len(msgs)) for _, m := range msgs { alert, err := alerts.Unmarshal(m.Value) if err != nil { @@ -108,7 +108,7 @@ func (service *EnricherService) processBatch(ctx context.Context, msgs []broker. continue } alertsIn.Inc() - states = append(states, &enrichAlertState{key: m.Key, alert: alert}) + states = append(states, &alertState{key: m.Key, alert: alert}) } if len(states) == 0 { return @@ -138,15 +138,15 @@ func (service *EnricherService) processBatch(ctx context.Context, msgs []broker. go func(name string, idxs []int) { defer wg.Done() - alerts := make([]*alerts.Alert, len(idxs)) + batch := make([]*alerts.Alert, len(idxs)) for j, idx := range idxs { - alerts[j] = states[idx].alert + batch[j] = states[idx].alert } cctx, cancel := context.WithTimeout(ctx, defaultEnrichmentTimeout) defer cancel() start := time.Now() - absent, removed, errs := service.pool.Enrich(cctx, name, alerts, "") + absent, removed, errs := service.pool.Enrich(cctx, name, batch, "") enrichmentLatency.WithLabelValues(name).Observe(time.Since(start).Seconds()) mu.Lock() @@ -155,12 +155,12 @@ func (service *EnricherService) processBatch(ctx context.Context, msgs []broker. case removed: service.Error(errors.NewF("enrichment %s removed", name)) for _, idx := range idxs { - states[idx].anyMissing = true + states[idx].deadLetter = true } case absent: service.Error(errors.NewF("enrichment %s not found", name)) for _, idx := range idxs { - states[idx].anyMissing = true + states[idx].deadLetter = true } default: for j, idx := range idxs { @@ -179,7 +179,7 @@ func (service *EnricherService) processBatch(ctx context.Context, msgs []broker. // Write results. for _, s := range states { - if s.anyMissing { + if s.deadLetter { s.alert.Attempts++ if s.alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { service.Info("alert %s passed through after %d attempts (enrichment unavailable)", s.alert.AlertID, s.alert.Attempts) diff --git a/cmd/alert_formatter/formatter/formatter.go b/cmd/alert_formatter/formatter/formatter.go index c359b25..75835fa 100644 --- a/cmd/alert_formatter/formatter/formatter.go +++ b/cmd/alert_formatter/formatter/formatter.go @@ -2,6 +2,7 @@ package formatter import ( "context" + "sync" "github.com/harishhary/blink/internal/broker" "github.com/harishhary/blink/internal/broker/kafka" @@ -26,6 +27,14 @@ var ( writeErrors = promauto.NewCounter(prometheus.CounterOpts{Namespace: "blink", Subsystem: "alert_formatter", Name: "write_errors_total"}) ) +// alertState holds a decoded alert and its formatting outcome. +type alertState struct { + key []byte + alert *alerts.Alert + snapshot []byte // pre-format serialization for rollback on error + deadLetter bool +} + type FormatterService struct { svcctx.ServiceContext reader broker.Reader @@ -62,49 +71,176 @@ func NewFormatterService(pool *fmtcatalog.Pool) (*FormatterService, error) { func (service *FormatterService) Name() string { return "alert-formatter" } -// Reads alerts from Kafka, applies formatters, and writes to the dispatcher topic. func (service *FormatterService) Run(ctx context.Context) errors.Error { - return services.RunAlertPipeline(ctx, service.Logger, service.reader, service.writer, service.dlq, 50, 4, - services.PipelineCounters{ - In: alertsIn.Inc, Out: alertsOut.Inc, DLQ: alertsDLQ.Inc, - ParseError: parseErrors.Inc, WriteError: writeErrors.Inc, - }, - func(ctx context.Context, _ []byte, alert *alerts.Alert) (skip bool, deadLetter bool) { - service.Info("applying formatters for alert %s", alert.AlertID) - - snapshot, merr := alerts.Marshal(alert) - if merr != nil { - service.Error(errors.NewE(merr)) - return false, false + for { + msgs, err := service.reader.ReadBatch(ctx, 50) + if err != nil { + if ctx.Err() != nil { + return nil } + service.Error(errors.NewE(err)) + continue + } + + service.processBatch(ctx, msgs) + + if err := service.reader.CommitMessages(ctx, msgs...); err != nil { + if ctx.Err() != nil { + return nil + } + service.Error(errors.NewE(err)) + } + } +} - for _, name := range alert.Rule.Formatters() { - _, absent, removed, err := service.pool.Format(ctx, name, alert, "") +func (service *FormatterService) processBatch(ctx context.Context, msgs []broker.Message) { + // Decode all alerts. + states := make([]*alertState, 0, len(msgs)) + for _, m := range msgs { + alert, err := alerts.Unmarshal(m.Value) + if err != nil { + parseErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsIn.Inc() + snapshot, _ := alerts.Marshal(alert) + states = append(states, &alertState{key: m.Key, alert: alert, snapshot: snapshot}) + } + if len(states) == 0 { + return + } + + // Formatters are applied sequentially per alert but alerts sharing the same + // formatter at each stage are batched together into one pool call. + // + // We collect all unique formatter names across all alerts, then process them + // stage by stage. Because different rules may declare different formatter + // sequences, we track per-alert progress via a pointer into their formatter list. + fmtProgress := make([]int, len(states)) // current formatter index per alert + + // Determine the maximum number of formatter stages across all alerts. + maxStages := 0 + for _, s := range states { + if n := len(s.alert.Rule.Formatters()); n > maxStages { + maxStages = n + } + } + + for stage := 0; stage < maxStages; stage++ { + // Collect alerts at this stage: those whose next formatter is at index `stage`. + type stageItem struct { + stateIdx int + fmtName string + } + // Group by formatter name for this stage. + byFormatter := make(map[string][]int) // fmtName → stateIdxs + for i, s := range states { + if s.deadLetter { + continue + } + fmts := s.alert.Rule.Formatters() + if fmtProgress[i] >= len(fmts) { + continue + } + name := fmts[fmtProgress[i]] + byFormatter[name] = append(byFormatter[name], i) + } + if len(byFormatter) == 0 { + break + } + + // Call each formatter once with all its alerts at this stage. + var mu sync.Mutex + var wg sync.WaitGroup + for name, idxs := range byFormatter { + wg.Add(1) + go func(name string, idxs []int) { + defer wg.Done() + + batch := make([]*alerts.Alert, len(idxs)) + for j, idx := range idxs { + batch[j] = states[idx].alert + } + + _, absent, removed, errs := service.pool.Format(ctx, name, batch, "") + + mu.Lock() + defer mu.Unlock() switch { - case removed || absent: + case absent, removed: label := "not found" if removed { label = "removed" } - service.Error(errors.NewF("formatter %s %s - alert %s missing formatter", name, label, alert.AlertID)) - alert.Attempts++ - if alert.Attempts >= services.MaxPluginAttempts { - service.Info("alert %s passed through after %d attempts (formatter unavailable)", alert.AlertID, alert.Attempts) - continue - } - return false, true - case err != nil: - formatterErrors.WithLabelValues(name).Inc() - service.Error(err) - if restored, uerr := alerts.Unmarshal(snapshot); uerr == nil { - *alert = *restored + for _, idx := range idxs { + s := states[idx] + service.Error(errors.NewF("formatter %s %s - alert %s missing formatter", name, label, s.alert.AlertID)) + s.alert.Attempts++ + if s.alert.Attempts >= services.MaxPluginAttempts { + service.Info("alert %s passed through after %d attempts (formatter unavailable)", s.alert.AlertID, s.alert.Attempts) + fmtProgress[idx] = len(s.alert.Rule.Formatters()) + } else { + s.deadLetter = true + } } - return false, false default: - formattersApplied.WithLabelValues(name).Inc() + for j, idx := range idxs { + s := states[idx] + if errs[j] != nil { + formatterErrors.WithLabelValues(name).Inc() + service.Error(errs[j]) + // Rollback to pre-format state before DLQ retry. + if restored, uerr := alerts.Unmarshal(s.snapshot); uerr == nil { + *s.alert = *restored + } + s.alert.Attempts++ + if s.alert.Attempts >= services.MaxPluginAttempts { + service.Info("alert %s passed through after %d attempts (formatter %s errored)", s.alert.AlertID, s.alert.Attempts, name) + fmtProgress[idx] = len(s.alert.Rule.Formatters()) + } else { + s.deadLetter = true + } + } else { + formattersApplied.WithLabelValues(name).Inc() + fmtProgress[idx]++ + } + } } + }(name, idxs) + } + wg.Wait() + } + + // Write results. + for _, s := range states { + if s.deadLetter && service.dlq != nil { + payload, err := alerts.Marshal(s.alert) + if err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue } - return false, false - }, - ) + if err := service.dlq.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}); err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + } else { + alertsDLQ.Inc() + } + continue + } + + payload, merr := alerts.Marshal(s.alert) + if merr != nil { + writeErrors.Inc() + service.Error(errors.NewE(merr)) + continue + } + if err := service.writer.WriteMessages(ctx, broker.Message{Key: s.key, Value: payload}); err != nil { + writeErrors.Inc() + service.Error(errors.NewE(err)) + continue + } + alertsOut.Inc() + } } diff --git a/cmd/rule_tuner/tuner/tuner.go b/cmd/rule_tuner/tuner/tuner.go index 2a45379..a01b986 100644 --- a/cmd/rule_tuner/tuner/tuner.go +++ b/cmd/rule_tuner/tuner/tuner.go @@ -44,7 +44,7 @@ type alertState struct { key []byte alert *alerts.Alert results []tuneResult - anyMissing bool + deadLetter bool } // TunerService reads alerts from Kafka, applies tuning rules, and writes to the enricher topic. @@ -154,7 +154,7 @@ func (service *TunerService) processBatch(ctx context.Context, msgs []broker.Mes service.Error(errors.NewF("tuning rule %s %s", name, label)) mu.Lock() for _, idx := range idxs { - states[idx].anyMissing = true + states[idx].deadLetter = true } mu.Unlock() return @@ -179,7 +179,7 @@ func (service *TunerService) processBatch(ctx context.Context, msgs []broker.Mes // Apply results and write. for _, s := range states { - if s.anyMissing { + if s.deadLetter { s.alert.Attempts++ if s.alert.Attempts >= services.MaxPluginAttempts || service.dlq == nil { service.Info("alert %s passed through after %d attempts (tuning rule unavailable)", s.alert.AlertID, s.alert.Attempts) From 14b114dd5669d6a47b6e4da5f23a29150d1a0a66 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Tue, 24 Mar 2026 01:15:53 +0100 Subject: [PATCH 07/14] generics for metadata config --- cmd/alert_enricher/main.go | 14 +- cmd/alert_formatter/main.go | 14 +- cmd/event_matcher/main.go | 16 +- cmd/rule_executor/main.go | 4 +- cmd/rule_tuner/main.go | 14 +- examples/enrichments/add-geo/add-geo.yaml | 14 + examples/enrichments/add-geo/main.go | 73 +++ examples/formatters/slack/main.go | 51 ++ examples/formatters/slack/slack.yaml | 11 + examples/matchers/allow-all/allow-all.yaml | 15 + examples/matchers/allow-all/main.go | 13 +- examples/rules/failed-login/main.go | 54 ++ examples/rules/failed-login/rule.yaml | 19 + .../boost-external-ip/boost-external-ip.yaml | 23 + .../tuning_rules/boost-external-ip/main.go | 61 ++ internal/backends/backend.go | 4 +- internal/backends/dynamodb/dynamodb.go | 15 +- internal/pluginmgr/manager.go | 555 ------------------ internal/pluginmgr/messages.go | 55 -- internal/pluginmgr/metrics.go | 47 -- internal/services/plugin_sync.go | 6 +- pkg/alerts/alert.go | 42 +- pkg/alerts/convert.go | 13 +- pkg/alerts/pb/alert.pb.go | 122 ++-- pkg/alerts/pb/alert.proto | 14 +- pkg/enrichments/enrichment.go | 58 +- pkg/enrichments/launcher.go | 80 ++- pkg/enrichments/manager.go | 9 +- pkg/enrichments/pool/pool.go | 27 +- pkg/enrichments/rpc_enrichment.go | 49 +- pkg/enrichments/sdk/serve.go | 46 +- pkg/formatters/formatter.go | 15 +- pkg/formatters/launcher.go | 76 ++- pkg/formatters/manager.go | 9 +- pkg/formatters/pool/pool.go | 27 +- pkg/formatters/rpc_formatter.go | 54 +- pkg/formatters/sdk/serve.go | 39 +- pkg/matchers/manager.go | 9 +- pkg/matchers/matcher.go | 14 +- pkg/matchers/pool/pool.go | 17 +- pkg/matchers/rpc_matcher.go | 59 +- pkg/matchers/sdk/serve.go | 38 +- pkg/rules/helpers.go | 9 +- pkg/rules/launcher.go | 16 +- pkg/rules/manager.go | 8 +- pkg/rules/pool/pool.go | 23 +- pkg/rules/rpc_rules.go | 224 +------ pkg/rules/rpc_rules/rule.pb.go | 452 ++++++-------- pkg/rules/rpc_rules/rule.proto | 16 +- pkg/rules/rpc_rules/rule_grpc.pb.go | 42 +- pkg/rules/rule.go | 84 +-- pkg/tuning_rules/manager.go | 9 +- pkg/tuning_rules/pool/pool.go | 17 +- pkg/tuning_rules/rpc_tuning_rule.go | 79 ++- pkg/tuning_rules/sdk/serve.go | 45 +- pkg/tuning_rules/tuning_rule.go | 22 +- 56 files changed, 1139 insertions(+), 1802 deletions(-) create mode 100644 examples/enrichments/add-geo/add-geo.yaml create mode 100644 examples/enrichments/add-geo/main.go create mode 100644 examples/formatters/slack/main.go create mode 100644 examples/formatters/slack/slack.yaml create mode 100644 examples/matchers/allow-all/allow-all.yaml create mode 100644 examples/rules/failed-login/main.go create mode 100644 examples/rules/failed-login/rule.yaml create mode 100644 examples/tuning_rules/boost-external-ip/boost-external-ip.yaml create mode 100644 examples/tuning_rules/boost-external-ip/main.go delete mode 100644 internal/pluginmgr/manager.go delete mode 100644 internal/pluginmgr/messages.go delete mode 100644 internal/pluginmgr/metrics.go diff --git a/cmd/alert_enricher/main.go b/cmd/alert_enricher/main.go index a8f97db..4689a0a 100644 --- a/cmd/alert_enricher/main.go +++ b/cmd/alert_enricher/main.go @@ -10,9 +10,10 @@ import ( "github.com/harishhary/blink/cmd/alert_enricher/enricher" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/enrichments" + enrichmentconfig "github.com/harishhary/blink/pkg/enrichments/config" pools "github.com/harishhary/blink/internal/pools" enrichcatalog "github.com/harishhary/blink/pkg/enrichments/pool" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -29,6 +30,12 @@ func main() { ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() + pluginDir := os.Getenv("ENRICHER_PLUGIN_DIR") + cfgWatcher, err := enrichmentconfig.NewWatcher(pluginDir) + if err != nil { + log.Fatalf("enrichment config watcher: %v", err) + } + routingTable := pools.NewRoutingTable() enricherPool := enrichcatalog.NewPool(routingTable, 0) @@ -36,8 +43,8 @@ func main() { "alert-enricher-sync", "BLINK-ALERT-ENRICHER - SYNC", "ENRICHER_PLUGIN_DIR", - func(log *logger.Logger, dir string) pluginmgr.Plugin { - return enrichments.NewManager(log, enricherPool.Sync, dir) + func(log *logger.Logger, dir string) plugin.Plugin { + return enrichments.NewManager(log, enricherPool.Sync, dir, cfgWatcher) }, ) if err != nil { @@ -50,6 +57,7 @@ func main() { runner := services.New() runner.Register( + cfgWatcher, syncSvc, enricherSvc, ) diff --git a/cmd/alert_formatter/main.go b/cmd/alert_formatter/main.go index ba8a605..ccb7c27 100644 --- a/cmd/alert_formatter/main.go +++ b/cmd/alert_formatter/main.go @@ -10,9 +10,10 @@ import ( "github.com/harishhary/blink/cmd/alert_formatter/formatter" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/formatters" + formatterconfig "github.com/harishhary/blink/pkg/formatters/config" pools "github.com/harishhary/blink/internal/pools" fmtcatalog "github.com/harishhary/blink/pkg/formatters/pool" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -29,6 +30,12 @@ func main() { ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() + pluginDir := os.Getenv("FORMATTER_PLUGIN_DIR") + cfgWatcher, err := formatterconfig.NewWatcher(pluginDir) + if err != nil { + log.Fatalf("formatter config watcher: %v", err) + } + routingTable := pools.NewRoutingTable() formatterPool := fmtcatalog.NewPool(routingTable, 0) @@ -36,8 +43,8 @@ func main() { "alert-formatter-sync", "BLINK-ALERT-FORMATTER - SYNC", "FORMATTER_PLUGIN_DIR", - func(log *logger.Logger, dir string) pluginmgr.Plugin { - return formatters.NewManager(log, formatterPool.Sync, dir) + func(log *logger.Logger, dir string) plugin.Plugin { + return formatters.NewManager(log, formatterPool.Sync, dir, cfgWatcher) }, ) if err != nil { @@ -50,6 +57,7 @@ func main() { runner := services.New() runner.Register( + cfgWatcher, syncSvc, formatterSvc, ) diff --git a/cmd/event_matcher/main.go b/cmd/event_matcher/main.go index 9edf299..8456702 100644 --- a/cmd/event_matcher/main.go +++ b/cmd/event_matcher/main.go @@ -10,9 +10,10 @@ import ( "github.com/harishhary/blink/cmd/event_matcher/matcher" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/matchers" + matcherconfig "github.com/harishhary/blink/pkg/matchers/config" pools "github.com/harishhary/blink/internal/pools" matchcatalog "github.com/harishhary/blink/pkg/matchers/pool" "github.com/harishhary/blink/pkg/rules/config" @@ -30,6 +31,7 @@ func main() { ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() + // Rule config watcher (used by the matcher service to look up rules). ruleConfigDir := os.Getenv("RULE_CONFIG_DIR") if ruleConfigDir == "" { log.Fatal("RULE_CONFIG_DIR is required") @@ -39,6 +41,13 @@ func main() { log.Fatalf("config watcher: %v", err) } + // Matcher plugin config watcher (YAML sidecars for matcher binaries). + matcherPluginDir := os.Getenv("MATCHER_PLUGIN_DIR") + matcherCfgWatcher, err := matcherconfig.NewWatcher(matcherPluginDir) + if err != nil { + log.Fatalf("matcher config watcher: %v", err) + } + routingTable := pools.NewRoutingTable() matcherPool := matchcatalog.NewPool(routingTable, 0) @@ -46,8 +55,8 @@ func main() { "event-matcher-sync", "BLINK-EVENT-MATCHER - SYNC", "MATCHER_PLUGIN_DIR", - func(log *logger.Logger, dir string) pluginmgr.Plugin { - return matchers.NewManager(log, matcherPool.Sync, dir) + func(log *logger.Logger, dir string) plugin.Plugin { + return matchers.NewManager(log, matcherPool.Sync, dir, matcherCfgWatcher) }, ) if err != nil { @@ -61,6 +70,7 @@ func main() { runner := services.New() runner.Register( cfgWatcherSvc, + matcherCfgWatcher, syncSvc, matcherSvc, ) diff --git a/cmd/rule_executor/main.go b/cmd/rule_executor/main.go index 8494e63..7b14c09 100644 --- a/cmd/rule_executor/main.go +++ b/cmd/rule_executor/main.go @@ -10,7 +10,7 @@ import ( "github.com/harishhary/blink/cmd/rule_executor/executor" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/rules/config" @@ -47,7 +47,7 @@ func main() { "rule-executor-sync", "BLINK-RULE-EXECUTOR - SYNC", "RULE_PLUGIN_DIR", - func(log *logger.Logger, dir string) pluginmgr.Plugin { + func(log *logger.Logger, dir string) plugin.Plugin { return rules.NewManager(log, rulePool.Sync, dir, cfgWatcher) }, ) diff --git a/cmd/rule_tuner/main.go b/cmd/rule_tuner/main.go index 2ec712f..097de3f 100644 --- a/cmd/rule_tuner/main.go +++ b/cmd/rule_tuner/main.go @@ -10,9 +10,10 @@ import ( "github.com/harishhary/blink/cmd/rule_tuner/tuner" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/tuning_rules" + tuningconfig "github.com/harishhary/blink/pkg/tuning_rules/config" pools "github.com/harishhary/blink/internal/pools" tuningcatalog "github.com/harishhary/blink/pkg/tuning_rules/pool" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -29,6 +30,12 @@ func main() { ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() + pluginDir := os.Getenv("TUNER_PLUGIN_DIR") + cfgWatcher, err := tuningconfig.NewWatcher(pluginDir) + if err != nil { + log.Fatalf("tuning config watcher: %v", err) + } + routingTable := pools.NewRoutingTable() tuningPool := tuningcatalog.NewPool(routingTable, 0) @@ -36,8 +43,8 @@ func main() { "rule-tuner-sync", "BLINK-RULE-TUNER - SYNC", "TUNER_PLUGIN_DIR", - func(log *logger.Logger, dir string) pluginmgr.Plugin { - return tuning_rules.NewManager(log, tuningPool.Sync, dir) + func(log *logger.Logger, dir string) plugin.Plugin { + return tuning_rules.NewManager(log, tuningPool.Sync, dir, cfgWatcher) }, ) if err != nil { @@ -50,6 +57,7 @@ func main() { runner := services.New() runner.Register( + cfgWatcher, syncSvc, tunerSvc, ) diff --git a/examples/enrichments/add-geo/add-geo.yaml b/examples/enrichments/add-geo/add-geo.yaml new file mode 100644 index 0000000..c8db4f3 --- /dev/null +++ b/examples/enrichments/add-geo/add-geo.yaml @@ -0,0 +1,14 @@ +id: "550e8400-e29b-41d4-a716-446655440010" +name: "add_geo" +display_name: "Add Geo" +description: "Annotates alerts with geo_country and geo_is_internal derived from source_ip." +enabled: true +version: "1.0.0" + +# Dependency ordering: enrichments listed here must run before this one. +depends_on: [] + +# Rollout control (optional — defaults to blue-green). +# mode: "blue-green" # blue-green | canary | shadow +# min_procs: 1 +# max_procs: 2 diff --git a/examples/enrichments/add-geo/main.go b/examples/enrichments/add-geo/main.go new file mode 100644 index 0000000..78f9251 --- /dev/null +++ b/examples/enrichments/add-geo/main.go @@ -0,0 +1,73 @@ +package main + +import ( + "context" + "net" + + "github.com/harishhary/blink/internal/errors" + sdk "github.com/harishhary/blink/pkg/enrichments/sdk" +) + +// addGeo annotates each alert with geo_country and geo_is_internal derived +// from the source_ip field in the alert event. In production, replace the +// stub lookup with a real GeoIP database (e.g. MaxMind GeoLite2). +// +// All static metadata (name, id, enabled, depends_on, etc.) is declared in +// the companion add-geo.yaml sidecar file. +type addGeo struct{ sdk.BaseEnrichment } + +var privateNets = mustParseCIDRs([]string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "127.0.0.0/8", +}) + +func mustParseCIDRs(cidrs []string) []*net.IPNet { + out := make([]*net.IPNet, 0, len(cidrs)) + for _, cidr := range cidrs { + _, network, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + out = append(out, network) + } + return out +} + +func isPrivate(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, network := range privateNets { + if network.Contains(ip) { + return true + } + } + return false +} + +// Enrich receives the full alerts.Alert struct serialised to JSON (PascalCase +// field names, no struct tags). Return only the new fields to add; the host +// merges them into alert.Event. +func (addGeo) Enrich(_ context.Context, alert map[string]any) (map[string]any, errors.Error) { + event, _ := alert["Event"].(map[string]any) + sourceIP, _ := event["source_ip"].(string) + + internal := isPrivate(sourceIP) + + country := "external" + if internal { + country = "internal" + } + + return map[string]any{ + "geo_country": country, + "geo_is_internal": internal, + }, nil +} + +func main() { + sdk.Serve(addGeo{}) +} diff --git a/examples/formatters/slack/main.go b/examples/formatters/slack/main.go new file mode 100644 index 0000000..82812d9 --- /dev/null +++ b/examples/formatters/slack/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "context" + "fmt" + + "github.com/harishhary/blink/internal/errors" + sdk "github.com/harishhary/blink/pkg/formatters/sdk" +) + +// slackFormatter converts an alert dict into a Slack Block Kit payload. +// The host serialises the returned map to JSON and forwards it to the +// configured Slack output. +// +// All static metadata (name, id, enabled, etc.) is declared in +// the companion slack.yaml sidecar file. +type slackFormatter struct{ sdk.BaseFormatter } + +// Format receives the full alerts.Alert struct serialised to JSON. +// alerts.Alert has no JSON struct tags, so all field names are PascalCase. +// Event fields (source_ip etc.) are nested under "Event". +func (slackFormatter) Format(_ context.Context, alert map[string]any) (map[string]any, errors.Error) { + alertID, _ := alert["AlertID"].(string) + created, _ := alert["Created"].(string) + + // Rule fields are available via alert["Rule"] (a *config.RuleMetadata). + // Cast it if you need structured access; common fields are already in the event. + event, _ := alert["Event"].(map[string]any) + sourceName, _ := event["source_name"].(string) + + header := ":rotating_light: *Alert fired*" + body := fmt.Sprintf("*Source:* %s\n*Alert ID:* `%s` • *Time:* %s", sourceName, alertID, created) + + return map[string]any{ + "text": fmt.Sprintf("Alert fired — %s", alertID), + "blocks": []map[string]any{ + { + "type": "header", + "text": map[string]any{"type": "plain_text", "text": header, "emoji": true}, + }, + { + "type": "section", + "text": map[string]any{"type": "mrkdwn", "text": body}, + }, + }, + }, nil +} + +func main() { + sdk.Serve(slackFormatter{}) +} diff --git a/examples/formatters/slack/slack.yaml b/examples/formatters/slack/slack.yaml new file mode 100644 index 0000000..2ec91f0 --- /dev/null +++ b/examples/formatters/slack/slack.yaml @@ -0,0 +1,11 @@ +id: "550e8400-e29b-41d4-a716-446655440011" +name: "slack" +display_name: "Slack Formatter" +description: "Converts an alert into a Slack Block Kit payload." +enabled: true +version: "1.0.0" + +# Rollout control (optional — defaults to blue-green). +# mode: "blue-green" # blue-green | canary | shadow +# min_procs: 1 +# max_procs: 2 diff --git a/examples/matchers/allow-all/allow-all.yaml b/examples/matchers/allow-all/allow-all.yaml new file mode 100644 index 0000000..a8e89fd --- /dev/null +++ b/examples/matchers/allow-all/allow-all.yaml @@ -0,0 +1,15 @@ +id: "550e8400-e29b-41d4-a716-446655440012" +name: "allow_all" +display_name: "Allow All" +description: "Matches every event. Use for testing only." +enabled: true +version: "1.0.0" + +# global: true means this matcher runs on all rules regardless of their +# matchers list. Set to false (or omit) for opt-in use only. +global: false + +# Rollout control (optional — defaults to blue-green). +# mode: "blue-green" # blue-green | canary | shadow +# min_procs: 1 +# max_procs: 2 diff --git a/examples/matchers/allow-all/main.go b/examples/matchers/allow-all/main.go index 8ec3c21..73f1f6d 100644 --- a/examples/matchers/allow-all/main.go +++ b/examples/matchers/allow-all/main.go @@ -8,18 +8,11 @@ import ( "github.com/harishhary/blink/pkg/matchers/sdk" ) +// allowAll matches every event. Use for testing only. +// All static metadata (name, id, enabled, global, etc.) is declared in +// the companion allow-all.yaml sidecar file. type allowAll struct{ sdk.BaseMatcher } -func (allowAll) Metadata() sdk.MatcherMetadata { - return sdk.MatcherMetadata{ - ID: "allow-all", - Name: "Allow All", - Description: "Matches every event - use for testing only.", - Enabled: true, - Version: "1.0.0", - } -} - func (allowAll) Match(_ context.Context, _ events.Event) (bool, errors.Error) { return true, nil } diff --git a/examples/rules/failed-login/main.go b/examples/rules/failed-login/main.go new file mode 100644 index 0000000..db3ce1b --- /dev/null +++ b/examples/rules/failed-login/main.go @@ -0,0 +1,54 @@ +package main + +import ( + "context" + "fmt" + "strings" + + "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/rules/sdk" +) + +// failedLogin fires when a login attempt is recorded as failed. +// Rule metadata (severity, log_types, matchers, etc.) lives in rule.yaml. +// +// It overrides AlertTitle, AlertContext, and AlertSeverity to produce +// richer alerts. All other sdk.BaseRule methods use their default (no-op) values. +type failedLogin struct{ sdk.BaseRule } + +func (failedLogin) Evaluate(_ context.Context, event events.Event) (bool, errors.Error) { + action, _ := event["action"].(string) + status, _ := event["status"].(string) + return strings.EqualFold(action, "login") && strings.EqualFold(status, "failed"), nil +} + +// AlertTitle produces "Failed login: " using the event's user field. +func (failedLogin) AlertTitle(event events.Event) string { + user, _ := event["user"].(string) + if user == "" { + return "Failed login attempt" + } + return fmt.Sprintf("Failed login: %s", user) +} + +// AlertContext adds structured fields that enrich the alert for downstream rules and outputs. +func (failedLogin) AlertContext(event events.Event) map[string]any { + return map[string]any{ + "login_action": event["action"], + "login_status": event["status"], + "source_ip": event["source_ip"], + } +} + +// AlertSeverity escalates to "high" when a failure count field is present and large. +func (failedLogin) AlertSeverity(event events.Event) string { + if count, ok := event["failure_count"].(float64); ok && count >= 10 { + return "high" + } + return "" // "" → use YAML default ("medium") +} + +func main() { + sdk.Serve(failedLogin{}) +} diff --git a/examples/rules/failed-login/rule.yaml b/examples/rules/failed-login/rule.yaml new file mode 100644 index 0000000..c10bde6 --- /dev/null +++ b/examples/rules/failed-login/rule.yaml @@ -0,0 +1,19 @@ +id: "00000000-0000-0000-0000-000000000002" +name: "failed_login" +display_name: "Failed Login" +description: "Fires on any failed login attempt." +enabled: true +version: "1.0.0" + +severity: "medium" +confidence: "high" + +log_types: ["application"] +matchers: ["allow-all"] + +req_subkeys: [] +merge_by_keys: ["source_ip"] +merge_window_mins: 5 + +signal: false +tags: ["authentication", "brute-force"] diff --git a/examples/tuning_rules/boost-external-ip/boost-external-ip.yaml b/examples/tuning_rules/boost-external-ip/boost-external-ip.yaml new file mode 100644 index 0000000..a08373d --- /dev/null +++ b/examples/tuning_rules/boost-external-ip/boost-external-ip.yaml @@ -0,0 +1,23 @@ +id: "550e8400-e29b-41d4-a716-446655440013" +name: "boost_external_ip" +display_name: "Boost External IP" +description: "Raises alert confidence when source_ip is not in RFC 1918 address space." +enabled: true +version: "1.0.0" + +# global: true applies this tuning rule to every alert, not just those whose +# rule lists it explicitly. +global: false + +# rule_type controls what the tuning rule does when Tune() returns true. +# ignore — suppress the alert entirely +# set_confidence — set confidence to the value in `confidence` +# increase_confidence — add `confidence` to the current score +# decrease_confidence — subtract `confidence` from the current score +rule_type: "increase_confidence" +confidence: "medium" + +# Rollout control (optional — defaults to blue-green). +# mode: "blue-green" # blue-green | canary | shadow +# min_procs: 1 +# max_procs: 2 diff --git a/examples/tuning_rules/boost-external-ip/main.go b/examples/tuning_rules/boost-external-ip/main.go new file mode 100644 index 0000000..2dc2324 --- /dev/null +++ b/examples/tuning_rules/boost-external-ip/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "context" + "net" + + "github.com/harishhary/blink/internal/errors" + sdk "github.com/harishhary/blink/pkg/tuning_rules/sdk" +) + +// boostExternalIP raises alert confidence when the source_ip is not in +// RFC 1918 address space — external origin is a stronger signal. +// +// All static metadata (name, id, enabled, global, rule_type, confidence, etc.) +// is declared in the companion boost-external-ip.yaml sidecar file. +type boostExternalIP struct{ sdk.BaseTuningRule } + +var privateNets = mustParseCIDRs([]string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "127.0.0.0/8", +}) + +func mustParseCIDRs(cidrs []string) []*net.IPNet { + out := make([]*net.IPNet, 0, len(cidrs)) + for _, cidr := range cidrs { + _, network, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + out = append(out, network) + } + return out +} + +func isPrivate(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, network := range privateNets { + if network.Contains(ip) { + return true + } + } + return false +} + +// Tune returns true when the rule applies — i.e. the source IP is external. +// alert is the full alerts.Alert struct serialised to JSON (no struct tags, +// so field names are PascalCase). The event fields live under "Event". +func (boostExternalIP) Tune(_ context.Context, alert map[string]any) (bool, errors.Error) { + event, _ := alert["Event"].(map[string]any) + sourceIP, _ := event["source_ip"].(string) + return !isPrivate(sourceIP), nil +} + +func main() { + sdk.Serve(boostExternalIP{}) +} diff --git a/internal/backends/backend.go b/internal/backends/backend.go index f5cdaf0..10d6ec2 100644 --- a/internal/backends/backend.go +++ b/internal/backends/backend.go @@ -4,7 +4,7 @@ import ( "context" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/rules" + "github.com/harishhary/blink/pkg/rules/config" ) type Record map[string]any @@ -26,7 +26,7 @@ type IAlertStore interface { // IRuleStore covers rule-level queries (distinct rule names + bulk rule fetch). type IRuleStore interface { RuleNamesGenerator() <-chan string - FetchAllRules() (<-chan rules.Metadata, error) + FetchAllRules() (<-chan *config.RuleMetadata, error) } // IBackend is the full backend capability: alert store + rule store. diff --git a/internal/backends/dynamodb/dynamodb.go b/internal/backends/dynamodb/dynamodb.go index b4cde60..d3d6343 100644 --- a/internal/backends/dynamodb/dynamodb.go +++ b/internal/backends/dynamodb/dynamodb.go @@ -16,7 +16,7 @@ import ( "github.com/harishhary/blink/internal/backends" "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/rules" + rulesconfig "github.com/harishhary/blink/pkg/rules/config" ) type DynamoDBBackend struct { @@ -356,13 +356,13 @@ func (at *DynamoDBBackend) ToRecord(alert *alerts.Alert) (backends.Record, error return result, nil } -func (at *DynamoDBBackend) FetchAllRules() (<-chan rules.Metadata, error) { +func (at *DynamoDBBackend) FetchAllRules() (<-chan *rulesconfig.RuleMetadata, error) { input := &dynamodb.ScanInput{ TableName: aws.String(at.dbName), Select: types.SelectAllAttributes, } - out := make(chan rules.Metadata) + out := make(chan *rulesconfig.RuleMetadata) go func() { defer close(out) generator := at.paginateScan(at.db.Scan, input) @@ -379,11 +379,10 @@ func (at *DynamoDBBackend) FetchAllRules() (<-chan rules.Metadata, error) { return out, nil } -func (at *DynamoDBBackend) unmarshalRule(item map[string]types.AttributeValue) (rules.Metadata, error) { - var rule rules.Metadata - err := attributevalue.UnmarshalMap(item, &rule) - if err != nil { +func (at *DynamoDBBackend) unmarshalRule(item map[string]types.AttributeValue) (*rulesconfig.RuleMetadata, error) { + var rule rulesconfig.RuleMetadata + if err := attributevalue.UnmarshalMap(item, &rule); err != nil { return nil, fmt.Errorf("failed to unmarshal item to rule: %w", err) } - return rule, nil + return &rule, nil } diff --git a/internal/pluginmgr/manager.go b/internal/pluginmgr/manager.go deleted file mode 100644 index 45c7b30..0000000 --- a/internal/pluginmgr/manager.go +++ /dev/null @@ -1,555 +0,0 @@ -package pluginmgr - -import ( - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - plugin "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - "github.com/harishhary/blink/internal/errors" - "github.com/harishhary/blink/internal/helpers" - "github.com/harishhary/blink/internal/logger" -) - -// Plugin is implemented by every plugin Manager - it can be started. -type Plugin interface { - Start(ctx context.Context) error -} - -// ISyncable is the type constraint for all plugin types managed by a Manager. -type ISyncable interface { - Name() string - Description() string - Enabled() bool - Checksum() string -} - -// PluginLifecycle provides the health-check and graceful-shutdown primitives the Manager uses in ping loops and kill paths. -type PluginLifecycle interface { - Ping(ctx context.Context) error - Shutdown(ctx context.Context) error -} - -// PluginHandle tracks everything the Manager needs for one running plugin subprocess. -type PluginHandle struct { - Client *plugin.Client - Lifecycle PluginLifecycle - BinPath string - ID string // stable plugin identifier (e.g. UUID); used for bus messages and pool ops - Name string // human-readable display name; used for logging - Hash string // SHA-256 of the binary at launch time - killOnce sync.Once - stopped chan struct{} -} - -// PluginAdapter[T] encapsulates every piece of type-specific plugin logic. -// Implement once per plugin type and inject into NewManager. -type PluginAdapter[T ISyncable] interface { - // This is the go-plugin dispense key, e.g. "rule", "enrichment". - PluginKey() string - // This is the HandshakeConfig cookie value, e.g. "rule_v1". - MagicValue() string - // GRPCPlugin returns the go-plugin.Plugin that constructs the gRPC client stub. - GRPCPlugin() plugin.Plugin - // Handshake type-asserts the dispensed raw interface, calls Init (and optionally GetMetadata), - // and returns the wrapped public T, a PluginLifecycle, the plugin stable ID, the display name, and any error. - Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (T, PluginLifecycle, string, string, error) - // IsEnabled reports whether a running handle should continue running. - IsEnabled(handle *PluginHandle) bool - // Returns how many subprocess instances to spawn for this binary. - // Return 1 (or ≤ 0) for the default single-worker behaviour. - Workers(binPath string) int -} - -// startFailure tracks consecutive start failures for a binary path. -type startFailure struct { - count int - nextRetry time.Time - hash string // hash at time of last failure; reset backoff if binary changes -} - -// PluginManager[T] is the generic plugin subprocess manager. -// It watches a directory for executable binaries, manages their subprocess lifecycle, and calls notify for Register/Update/Unregister events so the caller can update pools. -type PluginManager[T ISyncable] struct { - log *logger.Logger - notify Notify - dir string - adapter PluginAdapter[T] - metrics *PluginManagerMetrics - mu sync.RWMutex - plugin_handles map[string][]*PluginHandle - failures map[string]*startFailure - restarting map[string]struct{} // paths mid-restart; reconcile skips these to prevent double-start -} - -func NewPluginManager[T ISyncable]( - log *logger.Logger, - notify Notify, - dir string, - adapter PluginAdapter[T], - metrics *PluginManagerMetrics, -) *PluginManager[T] { - return &PluginManager[T]{ - log: log, - notify: notify, - dir: dir, - adapter: adapter, - metrics: metrics, - plugin_handles: make(map[string][]*PluginHandle), - failures: make(map[string]*startFailure), - restarting: make(map[string]struct{}), - } -} - -// Performs an initial reconcile then watches the plugin directory for changes. -func (m *PluginManager[T]) Start(ctx context.Context) error { - if err := m.reconcile("initial"); err != nil { - return err - } - - w, err := fsnotify.NewWatcher() - if err != nil { - return err - } - if err := w.Add(m.dir); err != nil { - w.Close() - return err - } - - go func() { - defer w.Close() - var timer *time.Timer - debounce := 400 * time.Millisecond - // Periodic fallback: on macOS/kqueue, REMOVE events may not fire while a running - // subprocess holds the binary's fd open. A 5-second poll catches those gaps, - // and also picks up YAML sidecar changes that disable/remove rules. - poll := time.NewTicker(5 * time.Second) - defer poll.Stop() - - trigger := func(reason string) { - if err := m.reconcile(reason); err != nil { - m.log.ErrorF("reconcile error: %v", err) - } - } - - for { - select { - case evt, ok := <-w.Events: - if !ok { - return - } - info, _ := os.Stat(evt.Name) - if info != nil && info.Mode()&0111 == 0 { - continue // skip non-executables - } - // AfterFunc timers have no drainable C channel - just Stop and replace. - if timer != nil { - timer.Stop() - } - timer = time.AfterFunc(debounce, func() { trigger("debounce") }) - case <-poll.C: - trigger("poll") - case err := <-w.Errors: - m.log.ErrorF("fsnotify error: %v", err) - trigger("overflow") - case <-ctx.Done(): - return - } - } - }() - - return nil -} - -func (m *PluginManager[T]) reconcile(reason string) error { - m.log.Info("reconciling %s plugins (%s)...", m.adapter.PluginKey(), reason) - - entries, err := os.ReadDir(m.dir) - if err != nil { - return err - } - - seen := make(map[string]struct{}) - for _, e := range entries { - if e.IsDir() { - continue - } - path := filepath.Join(m.dir, e.Name()) - info, err := e.Info() - if err != nil || info.Mode()&0111 == 0 { - continue // skip non-executables - } - h, err := helpers.BinaryChecksum(path) - if err != nil { - m.log.ErrorF("hash %s: %v", path, err) - continue - } - seen[path] = struct{}{} - - m.mu.RLock() - handles, exists := m.plugin_handles[path] - _, pending := m.restarting[path] - m.mu.RUnlock() - - if pending { - continue // pingLoop is already handling the restart - } - - if exists { - if handles[0].Hash == h { - continue // binary unchanged - } - if err := m.update(path, handles, h); err != nil { - m.log.ErrorF("update %s %s: %v", m.adapter.PluginKey(), path, err) - } - continue - } - - if err := m.startWithBackoff(path, h); err != nil { - m.log.ErrorF("start %s %s: %v", m.adapter.PluginKey(), path, err) - } - } - - // Collect plugins that need to be stopped or removed, then act outside the lock - // so that kill() (gRPC Shutdown, up to 3s) does not block readers. - type pendingAction struct { - key string - handles []*PluginHandle - perm bool // true = binary deleted (remove); false = disabled (stop) - } - var pending []pendingAction - m.mu.RLock() - for key, handles := range m.plugin_handles { - _, present := seen[key] - if !present { - pending = append(pending, pendingAction{key, handles, true}) - } else if !m.adapter.IsEnabled(handles[0]) { - pending = append(pending, pendingAction{key, handles, false}) - } - } - m.mu.RUnlock() - - for _, p := range pending { - if p.perm { - m.remove(p.key, p.handles) - } else { - m.stop(p.key, p.handles) - } - } - return nil -} - -// This is a gRPC service config that retries UNAVAILABLE responses with exponential backoff. This absorbs the startup race where the subprocess hasn't yet -// bound its port when the first RPC arrives. maxAttempts=3 means 1 attempt + 2 retries. -const pluginRetryPolicy = `{ - "methodConfig": [{ - "name": [{}], - "retryPolicy": { - "maxAttempts": 3, - "initialBackoff": "0.1s", - "maxBackoff": "1s", - "backoffMultiplier": 2.0, - "retryableStatusCodes": ["UNAVAILABLE"] - } - }] -}` - -// spawn ONE subprocess, runs the PluginAdapter handshake, and returns the -// wrapped handle. It does NOT store the handle in plugin_handles or start pingLoop - -// spawnN handles that after all worker instances are ready. -func (m *PluginManager[T]) spawn(path, hash string) (T, *PluginHandle, error) { - startedAt := time.Now() - - cfg := &plugin.ClientConfig{ - HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "BLINK_PLUGIN", - MagicCookieValue: m.adapter.MagicValue(), - }, - Cmd: exec.Command(path), - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - Plugins: map[string]plugin.Plugin{ - m.adapter.PluginKey(): m.adapter.GRPCPlugin(), - }, - GRPCDialOptions: []grpc.DialOption{ - grpc.WithDefaultServiceConfig(pluginRetryPolicy), - }, - } - - cl := plugin.NewClient(cfg) - rpcClient, err := cl.Client() - if err != nil { - cl.Kill() - var zero T - return zero, nil, fmt.Errorf("connect: %w", err) - } - - raw, err := rpcClient.Dispense(m.adapter.PluginKey()) - if err != nil { - cl.Kill() - var zero T - return zero, nil, fmt.Errorf("dispense: %w", err) - } - - wrapped, lifecycle, id, name, err := m.adapter.Handshake(context.Background(), raw, path, hash) - if err != nil { - cl.Kill() - var zero T - return zero, nil, err - } - - handle := &PluginHandle{Client: cl, Lifecycle: lifecycle, BinPath: path, ID: id, Name: name, Hash: hash, stopped: make(chan struct{})} - - m.metrics.StartLatency.Observe(time.Since(startedAt).Seconds()) - m.metrics.ActiveSubprocesses.WithLabelValues(m.adapter.PluginKey()).Inc() - m.metrics.Starts.Inc() - m.log.Info("%s started: %s [%s] (%s)", m.adapter.PluginKey(), name, id, path) - - return wrapped, handle, nil -} - -// spawnN spawns n worker subprocess instances for the same binary, stores the full -// slice in plugin_handles, and starts a pingLoop for each. If any spawn fails, all -// already-started subprocesses are killed and an error is returned. -func (m *PluginManager[T]) spawnN(path, hash string, n int) ([]T, []*PluginHandle, error) { - if n <= 0 { - n = 1 - } - wrapped := make([]T, 0, n) - handles := make([]*PluginHandle, 0, n) - - for i := 0; i < n; i++ { - w, h, err := m.spawn(path, hash) - if err != nil { - for _, h := range handles { - m.kill(h) - } - return nil, nil, err - } - wrapped = append(wrapped, w) - handles = append(handles, h) - } - - m.mu.Lock() - m.plugin_handles[path] = handles - m.mu.Unlock() - - for _, h := range handles { - go m.pingLoop(h) - } - return wrapped, handles, nil -} - -// wraps start() with exponential backoff on consecutive failures. -func (m *PluginManager[T]) startWithBackoff(path, hash string) error { - m.mu.Lock() - f := m.failures[path] - if f != nil { - if f.hash != hash { - // Binary changed — reset backoff immediately. - delete(m.failures, path) - f = nil - } else if time.Now().Before(f.nextRetry) { - m.mu.Unlock() - m.log.Info("%s %s start deferred (backoff, retry in %v)", m.adapter.PluginKey(), path, time.Until(f.nextRetry).Round(time.Second)) - return nil - } - } - m.mu.Unlock() - - err := m.start(path, hash) - if err != nil { - m.mu.Lock() - f = m.failures[path] // re-fetch: another goroutine may have updated this key between the two lock acquisitions - if f == nil { - f = &startFailure{hash: hash} - m.failures[path] = f - } - f.count++ - backoff := time.Duration(10< 5*time.Minute { - backoff = 5 * time.Minute - } - f.nextRetry = time.Now().Add(backoff) - m.mu.Unlock() - m.log.ErrorF("%s %s start failed (attempt %d), next retry in %v", m.adapter.PluginKey(), path, f.count, backoff) - return err - } - - // Success — clear any failure state. - m.mu.Lock() - delete(m.failures, path) - m.mu.Unlock() - return nil -} - -// spawns n worker subprocesses and notifies the pool to register them. -func (m *PluginManager[T]) start(path, hash string) error { - n := m.adapter.Workers(path) - wrapped, handles, err := m.spawnN(path, hash, n) - if err != nil { - return err - } - m.notify(NewRegisterMessage[T](wrapped, len(handles))) - return nil -} - -// spawns new worker subprocesses and notifies the pool with an onDrained callback. -// The old subprocesses are only killed after all in-flight calls on the old VersionedPool -// complete - ensuring no call ever hits a dead gRPC connection. -func (m *PluginManager[T]) update(path string, oldHandles []*PluginHandle, newHash string) error { - n := m.adapter.Workers(path) - wrapped, newHandles, err := m.spawnN(path, newHash, n) - if err != nil { - return err - } - m.notify(NewUpdateMessage[T](wrapped, len(newHandles), func() { - for _, h := range oldHandles { - m.kill(h) - } - })) - m.metrics.Updates.Inc() - m.log.Info("%s updated: %s (%d worker(s))", m.adapter.PluginKey(), path, len(newHandles)) - return nil -} - -// kill gracefully shuts down the subprocess exactly once (safe for concurrent calls). -// It does NOT touch plugin_handles - callers that own the map entry call evict instead. -func (m *PluginManager[T]) kill(handle *PluginHandle) { - handle.killOnce.Do(func() { - close(handle.stopped) - defer func() { - if r := recover(); r != nil { - m.log.ErrorF("panic during shutdown of %s [%s]: %v", handle.Name, handle.ID, r) - } - }() - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - _ = handle.Lifecycle.Shutdown(ctx) - cancel() - handle.Client.Kill() - m.metrics.ActiveSubprocesses.WithLabelValues(m.adapter.PluginKey()).Dec() - }) -} - -// kills all handles in the group and removes the group from plugin_handles. -// It acquires the write lock only for the map delete, so kill() (gRPC Shutdown) -// runs outside the lock. Guards against a concurrent handle replacement at the same key -// by checking that the stored slice still begins with the same pointer. -func (m *PluginManager[T]) evict(key string, handles []*PluginHandle) { - for _, h := range handles { - m.kill(h) - } - m.mu.Lock() - current := m.plugin_handles[key] - if len(current) > 0 && len(handles) > 0 && current[0] == handles[0] { - delete(m.plugin_handles, key) - } - m.mu.Unlock() -} - -// evicts the subprocesses transiently (crash restart, config disable) and -// sends UnregisterMessage - pool removes the active entry but does NOT tombstone. -func (m *PluginManager[T]) stop(key string, handles []*PluginHandle) { - m.evict(key, handles) - m.notify(NewUnregisterMessage[T](handles[0].ID)) - m.log.Info("%s stopped: %s [%s]", m.adapter.PluginKey(), handles[0].Name, handles[0].ID) -} - -// evicts the subprocesses permanently (binary deleted from disk) and -// sends RemoveMessage - pool removes the active entry AND tombstones the plugin ID. -func (m *PluginManager[T]) remove(key string, handles []*PluginHandle) { - m.evict(key, handles) - m.notify(NewRemoveMessage[T](handles[0].ID)) - m.log.Info("%s removed: %s [%s]", m.adapter.PluginKey(), handles[0].Name, handles[0].ID) -} - -// stops the subprocesses and restarts them with backoff. -// Sets restarting[path] before stop() so reconcile() does not race to fill the -// now-empty plugin_handles slot while the new spawn is in progress. -// If restarting[path] is already set, another pingLoop worker beat us to it — bail. -func (m *PluginManager[T]) restart(key string, handles []*PluginHandle) error { - path := handles[0].BinPath - hash := handles[0].Hash - - m.mu.Lock() - if _, already := m.restarting[path]; already { - m.mu.Unlock() - return nil // another pingLoop worker is already handling this restart - } - m.restarting[path] = struct{}{} - m.mu.Unlock() - - m.stop(key, handles) - - err := m.startWithBackoff(path, hash) - - m.mu.Lock() - delete(m.restarting, path) - m.mu.Unlock() - - return err -} - -func (m *PluginManager[T]) pingLoop(handle *PluginHandle) { - t := time.NewTicker(15 * time.Second) - defer t.Stop() - for { - select { - case <-handle.stopped: - return // intentionally stopped - do not restart - case <-t.C: - // During a graceful update, spawnN stores the new handles in the map - // before notify() is called. If this handle is no longer in the active - // slice, it was replaced - exit without restarting. - m.mu.RLock() - current := m.plugin_handles[handle.BinPath] - m.mu.RUnlock() - active := false - for _, h := range current { - if h == handle { - active = true - break - } - } - if !active { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - err := handle.Lifecycle.Ping(ctx) - cancel() - if err != nil { - m.metrics.Crashes.Inc() - m.log.ErrorF("%s crash/health fail %s: %v - restarting", m.adapter.PluginKey(), handle.Name, err) - // Fetch the full current group so restart kills all workers, not just this one. - m.mu.RLock() - group := m.plugin_handles[handle.BinPath] - m.mu.RUnlock() - // Guard: if our handle is no longer in the group, update() replaced it - // while Ping() was running. The new workers' pingLoops own any future restarts. - inGroup := false - for _, h := range group { - if h == handle { - inGroup = true - break - } - } - if !inGroup { - return - } - if restartErr := m.restart(handle.BinPath, group); restartErr != nil { - m.log.Error(errors.NewF("restart failed for %s: %v", handle.BinPath, restartErr)) - } - m.metrics.Restarts.Inc() - return - } - } - } -} diff --git a/internal/pluginmgr/messages.go b/internal/pluginmgr/messages.go deleted file mode 100644 index 9004e3f..0000000 --- a/internal/pluginmgr/messages.go +++ /dev/null @@ -1,55 +0,0 @@ -package pluginmgr - -import "github.com/harishhary/blink/internal/messaging" - -// Notify is the callback a PluginManager calls when a plugin starts, updates, or stops. -// Implementations are typically pool.Sync methods that register/deregister plugin handles. -type Notify = func(messaging.Message) - -// Delivered when a new plugin subprocess is ready. -// Items holds all N worker instances for the binary; MaxProcs is the pool capacity hint. -type RegisterMessage[T ISyncable] struct { - messaging.IsMessage - Items []T - MaxProcs int -} - -// Delivered when a plugin subprocess is stopped transiently aka a crash being restarted, or a plugin disabled via config. The plugin may come back. -// Pool removes the active entry but does NOT tombstone the plugin ID. -type UnregisterMessage[T ISyncable] struct { - messaging.IsMessage - ItemID string -} - -// Delivered when a plugin binary is permanently deleted from disk. -// The plugin is not expected to return. Pool removes the active entry AND tombstones the plugin ID. -type RemoveMessage[T ISyncable] struct { - messaging.IsMessage - ItemID string -} - -// Delivered when a plugin binary changes in-place. -// Items holds all N worker instances for the new binary version. -// OnDrained is called by ProcessPool.drain once all in-flight calls on the old VersionedPool complete - the PluginManager uses it to kill the old subprocesses only after the pool has finished draining. -type UpdateMessage[T ISyncable] struct { - messaging.IsMessage - Items []T - MaxProcs int - OnDrained func() -} - -func NewRegisterMessage[T ISyncable](items []T, maxProcs int) RegisterMessage[T] { - return RegisterMessage[T]{Items: items, MaxProcs: maxProcs} -} - -func NewUnregisterMessage[T ISyncable](itemID string) UnregisterMessage[T] { - return UnregisterMessage[T]{ItemID: itemID} -} - -func NewRemoveMessage[T ISyncable](itemID string) RemoveMessage[T] { - return RemoveMessage[T]{ItemID: itemID} -} - -func NewUpdateMessage[T ISyncable](items []T, maxProcs int, onDrained func()) UpdateMessage[T] { - return UpdateMessage[T]{Items: items, MaxProcs: maxProcs, OnDrained: onDrained} -} diff --git a/internal/pluginmgr/metrics.go b/internal/pluginmgr/metrics.go deleted file mode 100644 index eea7b50..0000000 --- a/internal/pluginmgr/metrics.go +++ /dev/null @@ -1,47 +0,0 @@ -package pluginmgr - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -// Holds the Prometheus metrics shared by all plugin managers. -type PluginManagerMetrics struct { - Starts prometheus.Counter - Crashes prometheus.Counter - Restarts prometheus.Counter - Updates prometheus.Counter - StartLatency prometheus.Histogram - ActiveSubprocesses *prometheus.GaugeVec -} - -// Registers and returns a metric set for the given subsystem. -func NewPluginManagerMetrics(subsystem string) *PluginManagerMetrics { - return &PluginManagerMetrics{ - Starts: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_starts_total", - Help: "Total plugin subprocess starts.", - }), - Crashes: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_crashes_total", - Help: "Total plugin subprocess crashes detected by ping loop.", - }), - Restarts: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_restarts_total", - Help: "Total plugin subprocess restarts after crash.", - }), - Updates: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_updates_total", - Help: "Total plugin subprocess hot-updates (binary replacement).", - }), - StartLatency: promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_start_latency_seconds", - Help: "Time from plugin launch start to first bus publish.", - Buckets: prometheus.DefBuckets, - }), - ActiveSubprocesses: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "blink", Subsystem: "plugin_manager" + subsystem, Name: "plugin_active_subprocesses", - Help: "Number of currently active plugin subprocesses.", - }, []string{"type"}), - } -} diff --git a/internal/services/plugin_sync.go b/internal/services/plugin_sync.go index 322f938..69de457 100644 --- a/internal/services/plugin_sync.go +++ b/internal/services/plugin_sync.go @@ -9,13 +9,13 @@ import ( "github.com/harishhary/blink/internal/configuration" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" ) type PluginSyncService struct { svcctx.ServiceContext serviceName string - plugin pluginmgr.Plugin + plugin plugin.Plugin } // NewPluginSyncService creates a service that starts the plugin manager and waits for @@ -23,7 +23,7 @@ type PluginSyncService struct { // callback so lifecycle events flow directly to the pool with no intermediate bus. func NewPluginSyncService( name, displayName, envVar string, - newPluginManager func(*logger.Logger, string) pluginmgr.Plugin, + newPluginManager func(*logger.Logger, string) plugin.Plugin, ) (*PluginSyncService, error) { sc := svcctx.New(displayName) if err := configuration.LoadFromEnvironment(&sc); err != nil { diff --git a/pkg/alerts/alert.go b/pkg/alerts/alert.go index 9f6f930..19f38dc 100644 --- a/pkg/alerts/alert.go +++ b/pkg/alerts/alert.go @@ -12,7 +12,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules" + "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/scoring" ) @@ -35,20 +35,32 @@ type Alert struct { SourceService string Confidence scoring.Confidence // coming from base rule but changed by tuning rules - Severity scoring.Severity // coming from base rule but changed by asset tagging and dynamicSeverity + Severity scoring.Severity // coming from base rule but changed by asset tagging and AlertSeverity - Rule rules.Metadata + Rule *config.RuleMetadata + OverrideMergeByKeys []string // set by plugin's AlertMergeByKeys; overrides Rule.MergeByKeys() when non-nil +} + +// MergeByKeys returns the effective merge keys for this alert. +// The plugin's AlertMergeByKeys return value takes precedence over the YAML value. +func (a *Alert) MergeByKeys() []string { + if len(a.OverrideMergeByKeys) > 0 { + return a.OverrideMergeByKeys + } + return a.MergeByKeys() } // Creates a new Alert -func NewAlert(rule rules.Metadata, event events.Event, optFns ...AlertOptions) (*Alert, errors.Error) { +func NewAlert(rule *config.RuleMetadata, event events.Event, optFns ...AlertOptions) (*Alert, errors.Error) { alert := &Alert{ - AlertID: uuid.NewString(), - Created: time.Now().UTC(), - Attempts: 0, - Event: event, - Rule: rule, - Staged: false, + AlertID: uuid.NewString(), + Created: time.Now().UTC(), + Attempts: 0, + Event: event, + Rule: rule, + Staged: false, + Severity: rule.Severity(), + Confidence: rule.Confidence(), } for _, optFn := range optFns { optFn(alert) @@ -66,7 +78,7 @@ func Merge(alerts []*Alert) (*Alert, errors.Error) { return alerts[i].Created.Before(alerts[j].Created) }) - mergeKeys := alerts[0].Rule.MergeByKeys() + mergeKeys := alerts[0].MergeByKeys() cleanedEvents := make([]events.Event, len(alerts)) for i, alert := range alerts { cleanedEvents[i] = alert.Event.CleanEvent(mergeKeys) @@ -194,11 +206,11 @@ func (a *Alert) CanMerge(other *Alert) bool { return false } - if !helpers.EqualStringSlices(a.Rule.MergeByKeys(), other.Rule.MergeByKeys()) { + if !helpers.EqualStringSlices(a.MergeByKeys(), other.Rule.MergeByKeys()) { return false } - for _, key := range a.Rule.MergeByKeys() { + for _, key := range a.MergeByKeys() { if a.Event.GetFirstKey(key, "n/a") != other.Event.GetFirstKey(key, "n/a2") { return false } @@ -208,13 +220,13 @@ func (a *Alert) CanMerge(other *Alert) bool { } func (a *Alert) MergeEnabled() bool { - return len(a.Rule.MergeByKeys()) > 0 && a.Rule.MergeWindowMins() > 0 + return len(a.MergeByKeys()) > 0 && a.Rule.MergeWindowMins() > 0 } // MergePartitionKey returns a stable Kafka partition key for this alert so that alerts belonging to the same merge group always land on the same partition and therefore the same alert-merger replica. // The key is "rule_name|key1=val1|key2=val2" with merge-by fields sorted alphabetically. When merge is not enabled the rule name alone is returned, which is still a stable key - the merger will pass those alerts straight through on whichever partition they arrive. func (a *Alert) MergePartitionKey() string { - keys := a.Rule.MergeByKeys() + keys := a.MergeByKeys() sort.Strings(keys) merged := a.Event.GetMergedKeys(keys) parts := make([]string, 0, len(keys)+1) diff --git a/pkg/alerts/convert.go b/pkg/alerts/convert.go index 1907ae4..d574520 100644 --- a/pkg/alerts/convert.go +++ b/pkg/alerts/convert.go @@ -5,7 +5,6 @@ import ( "github.com/harishhary/blink/pkg/alerts/pb" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/scoring" proto "google.golang.org/protobuf/proto" @@ -45,7 +44,8 @@ func AlertToProto(a *Alert) (*pb.Alert, error) { Event: eventStruct, Staged: a.Staged, OutputsSent: a.OutputsSent, - EnrichmentsApplied: a.EnrichmentsApplied, + EnrichmentsApplied: a.EnrichmentsApplied, + OverrideMergeByKeys: a.OverrideMergeByKeys, LogSource: a.LogSource, LogType: a.LogType, SourceEntity: a.SourceEntity, @@ -75,7 +75,8 @@ func ProtoToAlert(p *pb.Alert) (*Alert, error) { Event: event, Staged: p.GetStaged(), OutputsSent: p.GetOutputsSent(), - EnrichmentsApplied: p.GetEnrichmentsApplied(), + EnrichmentsApplied: p.GetEnrichmentsApplied(), + OverrideMergeByKeys: p.GetOverrideMergeByKeys(), LogSource: p.GetLogSource(), LogType: p.GetLogType(), SourceEntity: p.GetSourceEntity(), @@ -87,8 +88,8 @@ func ProtoToAlert(p *pb.Alert) (*Alert, error) { return a, nil } -// Converts a Metadata value to its protobuf representation for embedding in an alert payload. -func ruleToProto(r rules.Metadata) *pb.RuleMetadata { +// Converts a *config.RuleMetadata to its protobuf representation for embedding in an alert payload. +func ruleToProto(r *config.RuleMetadata) *pb.RuleMetadata { if r == nil { return nil } @@ -112,7 +113,6 @@ func ruleToProto(r rules.Metadata) *pb.RuleMetadata { Enrichments: r.Enrichments(), TuningRules: r.TuningRules(), Version: r.Version(), - Checksum: r.Checksum(), FileName: r.FileName(), DisplayName: r.DisplayName(), References: r.References(), @@ -132,7 +132,6 @@ func protoToRuleMetadata(m *pb.RuleMetadata) *config.RuleMetadata { EnabledField: m.GetEnabled(), VersionField: m.GetVersion(), FileNameField: m.GetFileName(), - ChecksumField: m.GetChecksum(), SeverityStr: m.GetSeverity(), ConfidenceStr: m.GetConfidence(), SignalThresholdStr: m.GetSignalThreshold(), diff --git a/pkg/alerts/pb/alert.pb.go b/pkg/alerts/pb/alert.pb.go index 161f43c..aad8f35 100644 --- a/pkg/alerts/pb/alert.pb.go +++ b/pkg/alerts/pb/alert.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: pb/alert.proto +// source: pkg/alerts/pb/alert.proto package pb @@ -24,7 +24,7 @@ const ( // RuleMetadata carries rule configuration in the alert wire format. // Mirrors pkg/rules/rule.proto Metadata with additions for file_name, -// display_name, references, checksum, and risk_score. +// display_name, references, and risk_score. type RuleMetadata struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` @@ -46,7 +46,6 @@ type RuleMetadata struct { Enrichments []string `protobuf:"bytes,17,rep,name=enrichments,proto3" json:"enrichments,omitempty"` TuningRules []string `protobuf:"bytes,18,rep,name=tuning_rules,json=tuningRules,proto3" json:"tuning_rules,omitempty"` Version string `protobuf:"bytes,19,opt,name=version,proto3" json:"version,omitempty"` - Checksum string `protobuf:"bytes,20,opt,name=checksum,proto3" json:"checksum,omitempty"` FileName string `protobuf:"bytes,21,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` DisplayName string `protobuf:"bytes,22,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` References []string `protobuf:"bytes,23,rep,name=references,proto3" json:"references,omitempty"` @@ -57,7 +56,7 @@ type RuleMetadata struct { func (x *RuleMetadata) Reset() { *x = RuleMetadata{} - mi := &file_pb_alert_proto_msgTypes[0] + mi := &file_pkg_alerts_pb_alert_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -69,7 +68,7 @@ func (x *RuleMetadata) String() string { func (*RuleMetadata) ProtoMessage() {} func (x *RuleMetadata) ProtoReflect() protoreflect.Message { - mi := &file_pb_alert_proto_msgTypes[0] + mi := &file_pkg_alerts_pb_alert_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -82,7 +81,7 @@ func (x *RuleMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use RuleMetadata.ProtoReflect.Descriptor instead. func (*RuleMetadata) Descriptor() ([]byte, []int) { - return file_pb_alert_proto_rawDescGZIP(), []int{0} + return file_pkg_alerts_pb_alert_proto_rawDescGZIP(), []int{0} } func (x *RuleMetadata) GetId() string { @@ -218,13 +217,6 @@ func (x *RuleMetadata) GetVersion() string { return "" } -func (x *RuleMetadata) GetChecksum() string { - if x != nil { - return x.Checksum - } - return "" -} - func (x *RuleMetadata) GetFileName() string { if x != nil { return x.FileName @@ -256,30 +248,31 @@ func (x *RuleMetadata) GetRiskScore() string { // Alert is the Kafka wire format for a single alert travelling through the // tuner → enricher → formatter → dispatcher pipeline. type Alert struct { - state protoimpl.MessageState `protogen:"open.v1"` - AlertId string `protobuf:"bytes,1,opt,name=alert_id,json=alertId,proto3" json:"alert_id,omitempty"` - Attempts int32 `protobuf:"varint,2,opt,name=attempts,proto3" json:"attempts,omitempty"` - Cluster string `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - CreatedNs int64 `protobuf:"varint,4,opt,name=created_ns,json=createdNs,proto3" json:"created_ns,omitempty"` // time.Time as Unix nanoseconds - DispatchedNs int64 `protobuf:"varint,5,opt,name=dispatched_ns,json=dispatchedNs,proto3" json:"dispatched_ns,omitempty"` // time.Time as Unix nanoseconds (0 = not yet dispatched) - Event *structpb.Struct `protobuf:"bytes,6,opt,name=event,proto3" json:"event,omitempty"` // events.Event as structured protobuf - Staged bool `protobuf:"varint,7,opt,name=staged,proto3" json:"staged,omitempty"` - OutputsSent []string `protobuf:"bytes,8,rep,name=outputs_sent,json=outputsSent,proto3" json:"outputs_sent,omitempty"` - LogSource string `protobuf:"bytes,9,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` - LogType string `protobuf:"bytes,10,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` - SourceEntity string `protobuf:"bytes,11,opt,name=source_entity,json=sourceEntity,proto3" json:"source_entity,omitempty"` - SourceService string `protobuf:"bytes,12,opt,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"` - Confidence string `protobuf:"bytes,13,opt,name=confidence,proto3" json:"confidence,omitempty"` - Severity string `protobuf:"bytes,14,opt,name=severity,proto3" json:"severity,omitempty"` - Rule *RuleMetadata `protobuf:"bytes,15,opt,name=rule,proto3" json:"rule,omitempty"` - EnrichmentsApplied []string `protobuf:"bytes,16,rep,name=enrichments_applied,json=enrichmentsApplied,proto3" json:"enrichments_applied,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + AlertId string `protobuf:"bytes,1,opt,name=alert_id,json=alertId,proto3" json:"alert_id,omitempty"` + Attempts int32 `protobuf:"varint,2,opt,name=attempts,proto3" json:"attempts,omitempty"` + Cluster string `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` + CreatedNs int64 `protobuf:"varint,4,opt,name=created_ns,json=createdNs,proto3" json:"created_ns,omitempty"` // time.Time as Unix nanoseconds + DispatchedNs int64 `protobuf:"varint,5,opt,name=dispatched_ns,json=dispatchedNs,proto3" json:"dispatched_ns,omitempty"` // time.Time as Unix nanoseconds (0 = not yet dispatched) + Event *structpb.Struct `protobuf:"bytes,6,opt,name=event,proto3" json:"event,omitempty"` // events.Event as structured protobuf + Staged bool `protobuf:"varint,7,opt,name=staged,proto3" json:"staged,omitempty"` + OutputsSent []string `protobuf:"bytes,8,rep,name=outputs_sent,json=outputsSent,proto3" json:"outputs_sent,omitempty"` + LogSource string `protobuf:"bytes,9,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + LogType string `protobuf:"bytes,10,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` + SourceEntity string `protobuf:"bytes,11,opt,name=source_entity,json=sourceEntity,proto3" json:"source_entity,omitempty"` + SourceService string `protobuf:"bytes,12,opt,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"` + Confidence string `protobuf:"bytes,13,opt,name=confidence,proto3" json:"confidence,omitempty"` + Severity string `protobuf:"bytes,14,opt,name=severity,proto3" json:"severity,omitempty"` + Rule *RuleMetadata `protobuf:"bytes,15,opt,name=rule,proto3" json:"rule,omitempty"` + EnrichmentsApplied []string `protobuf:"bytes,16,rep,name=enrichments_applied,json=enrichmentsApplied,proto3" json:"enrichments_applied,omitempty"` + OverrideMergeByKeys []string `protobuf:"bytes,17,rep,name=override_merge_by_keys,json=overrideMergeByKeys,proto3" json:"override_merge_by_keys,omitempty"` // set by plugin's AlertMergeByKeys; overrides rule.merge_by_keys when non-empty + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Alert) Reset() { *x = Alert{} - mi := &file_pb_alert_proto_msgTypes[1] + mi := &file_pkg_alerts_pb_alert_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -291,7 +284,7 @@ func (x *Alert) String() string { func (*Alert) ProtoMessage() {} func (x *Alert) ProtoReflect() protoreflect.Message { - mi := &file_pb_alert_proto_msgTypes[1] + mi := &file_pkg_alerts_pb_alert_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -304,7 +297,7 @@ func (x *Alert) ProtoReflect() protoreflect.Message { // Deprecated: Use Alert.ProtoReflect.Descriptor instead. func (*Alert) Descriptor() ([]byte, []int) { - return file_pb_alert_proto_rawDescGZIP(), []int{1} + return file_pkg_alerts_pb_alert_proto_rawDescGZIP(), []int{1} } func (x *Alert) GetAlertId() string { @@ -419,11 +412,18 @@ func (x *Alert) GetEnrichmentsApplied() []string { return nil } -var File_pb_alert_proto protoreflect.FileDescriptor +func (x *Alert) GetOverrideMergeByKeys() []string { + if x != nil { + return x.OverrideMergeByKeys + } + return nil +} + +var File_pkg_alerts_pb_alert_proto protoreflect.FileDescriptor -const file_pb_alert_proto_rawDesc = "" + +const file_pkg_alerts_pb_alert_proto_rawDesc = "" + "\n" + - "\x0epb/alert.proto\x12\x06alerts\x1a\x1cgoogle/protobuf/struct.proto\"\xe7\x05\n" + + "\x19pkg/alerts/pb/alert.proto\x12\x06alerts\x1a\x1cgoogle/protobuf/struct.proto\"\xcb\x05\n" + "\fRuleMetadata\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + @@ -449,15 +449,14 @@ const file_pb_alert_proto_rawDesc = "" + "formatters\x12 \n" + "\venrichments\x18\x11 \x03(\tR\venrichments\x12!\n" + "\ftuning_rules\x18\x12 \x03(\tR\vtuningRules\x12\x18\n" + - "\aversion\x18\x13 \x01(\tR\aversion\x12\x1a\n" + - "\bchecksum\x18\x14 \x01(\tR\bchecksum\x12\x1b\n" + + "\aversion\x18\x13 \x01(\tR\aversion\x12\x1b\n" + "\tfile_name\x18\x15 \x01(\tR\bfileName\x12!\n" + "\fdisplay_name\x18\x16 \x01(\tR\vdisplayName\x12\x1e\n" + "\n" + "references\x18\x17 \x03(\tR\n" + "references\x12\x1d\n" + "\n" + - "risk_score\x18\x18 \x01(\tR\triskScore\"\xa3\x04\n" + + "risk_score\x18\x18 \x01(\tR\triskScore\"\xd8\x04\n" + "\x05Alert\x12\x19\n" + "\balert_id\x18\x01 \x01(\tR\aalertId\x12\x1a\n" + "\battempts\x18\x02 \x01(\x05R\battempts\x12\x18\n" + @@ -479,27 +478,28 @@ const file_pb_alert_proto_rawDesc = "" + "confidence\x12\x1a\n" + "\bseverity\x18\x0e \x01(\tR\bseverity\x12(\n" + "\x04rule\x18\x0f \x01(\v2\x14.alerts.RuleMetadataR\x04rule\x12/\n" + - "\x13enrichments_applied\x18\x10 \x03(\tR\x12enrichmentsAppliedB\bZ\x06pb/;pbb\x06proto3" + "\x13enrichments_applied\x18\x10 \x03(\tR\x12enrichmentsApplied\x123\n" + + "\x16override_merge_by_keys\x18\x11 \x03(\tR\x13overrideMergeByKeysB\bZ\x06pb/;pbb\x06proto3" var ( - file_pb_alert_proto_rawDescOnce sync.Once - file_pb_alert_proto_rawDescData []byte + file_pkg_alerts_pb_alert_proto_rawDescOnce sync.Once + file_pkg_alerts_pb_alert_proto_rawDescData []byte ) -func file_pb_alert_proto_rawDescGZIP() []byte { - file_pb_alert_proto_rawDescOnce.Do(func() { - file_pb_alert_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pb_alert_proto_rawDesc), len(file_pb_alert_proto_rawDesc))) +func file_pkg_alerts_pb_alert_proto_rawDescGZIP() []byte { + file_pkg_alerts_pb_alert_proto_rawDescOnce.Do(func() { + file_pkg_alerts_pb_alert_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_alerts_pb_alert_proto_rawDesc), len(file_pkg_alerts_pb_alert_proto_rawDesc))) }) - return file_pb_alert_proto_rawDescData + return file_pkg_alerts_pb_alert_proto_rawDescData } -var file_pb_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pb_alert_proto_goTypes = []any{ +var file_pkg_alerts_pb_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_alerts_pb_alert_proto_goTypes = []any{ (*RuleMetadata)(nil), // 0: alerts.RuleMetadata (*Alert)(nil), // 1: alerts.Alert (*structpb.Struct)(nil), // 2: google.protobuf.Struct } -var file_pb_alert_proto_depIdxs = []int32{ +var file_pkg_alerts_pb_alert_proto_depIdxs = []int32{ 2, // 0: alerts.Alert.event:type_name -> google.protobuf.Struct 0, // 1: alerts.Alert.rule:type_name -> alerts.RuleMetadata 2, // [2:2] is the sub-list for method output_type @@ -509,26 +509,26 @@ var file_pb_alert_proto_depIdxs = []int32{ 0, // [0:2] is the sub-list for field type_name } -func init() { file_pb_alert_proto_init() } -func file_pb_alert_proto_init() { - if File_pb_alert_proto != nil { +func init() { file_pkg_alerts_pb_alert_proto_init() } +func file_pkg_alerts_pb_alert_proto_init() { + if File_pkg_alerts_pb_alert_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_pb_alert_proto_rawDesc), len(file_pb_alert_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_alerts_pb_alert_proto_rawDesc), len(file_pkg_alerts_pb_alert_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_pb_alert_proto_goTypes, - DependencyIndexes: file_pb_alert_proto_depIdxs, - MessageInfos: file_pb_alert_proto_msgTypes, + GoTypes: file_pkg_alerts_pb_alert_proto_goTypes, + DependencyIndexes: file_pkg_alerts_pb_alert_proto_depIdxs, + MessageInfos: file_pkg_alerts_pb_alert_proto_msgTypes, }.Build() - File_pb_alert_proto = out.File - file_pb_alert_proto_goTypes = nil - file_pb_alert_proto_depIdxs = nil + File_pkg_alerts_pb_alert_proto = out.File + file_pkg_alerts_pb_alert_proto_goTypes = nil + file_pkg_alerts_pb_alert_proto_depIdxs = nil } diff --git a/pkg/alerts/pb/alert.proto b/pkg/alerts/pb/alert.proto index cbf2ad8..09bd725 100644 --- a/pkg/alerts/pb/alert.proto +++ b/pkg/alerts/pb/alert.proto @@ -7,7 +7,7 @@ option go_package = "pb/;pb"; // RuleMetadata carries rule configuration in the alert wire format. // Mirrors pkg/rules/rule.proto Metadata with additions for file_name, -// display_name, references, checksum, and risk_score. +// display_name, references, and risk_score. message RuleMetadata { string id = 1; string name = 2; @@ -28,11 +28,10 @@ message RuleMetadata { repeated string enrichments = 17; repeated string tuning_rules = 18; string version = 19; - string checksum = 20; - string file_name = 21; - string display_name = 22; - repeated string references = 23; - string risk_score = 24; + string file_name = 20; + string display_name = 21; + repeated string references = 22; + string risk_score = 23; } // Alert is the Kafka wire format for a single alert travelling through the @@ -53,5 +52,6 @@ message Alert { string confidence = 13; string severity = 14; RuleMetadata rule = 15; - repeated string enrichments_applied = 16; + repeated string enrichments_applied = 16; + repeated string override_merge_by_keys = 17; // set by plugin's AlertMergeByKeys; overrides rule.merge_by_keys when non-empty } diff --git a/pkg/enrichments/enrichment.go b/pkg/enrichments/enrichment.go index c3fcecf..5fe3913 100644 --- a/pkg/enrichments/enrichment.go +++ b/pkg/enrichments/enrichment.go @@ -2,65 +2,25 @@ package enrichments import ( "context" - "fmt" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" + "github.com/harishhary/blink/pkg/enrichments/config" ) -func ValidateDependencyGraph(enrichments []IEnrichment) error { - index := make(map[string]IEnrichment, len(enrichments)) - for _, e := range enrichments { - index[e.Name()] = e - } +// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to +// import an internal package. +type PluginMetadata = plugin.PluginMetadata - const ( - unvisited = iota - inProgress - done - ) - state := make(map[string]int, len(enrichments)) - - var visit func(name string, path []string) error - visit = func(name string, path []string) error { - switch state[name] { - case done: - return nil - case inProgress: - return fmt.Errorf("enrichment dependency cycle detected: %v → %s", path, name) - } - state[name] = inProgress - e, ok := index[name] - if !ok { - return fmt.Errorf("enrichment %q depends on unknown enrichment %q", path[len(path)-1], name) - } - for _, dep := range e.DependsOn() { - if err := visit(dep, append(path, name)); err != nil { - return err - } - } - state[name] = done - return nil - } - - for _, e := range enrichments { - if err := visit(e.Name(), []string{}); err != nil { - return err - } - } - return nil -} - -type IEnrichment interface { +type Enrichment interface { Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error // DependsOn returns plugin names that must run before this enrichment. + // Populated from the YAML sidecar depends_on field. DependsOn() []string - Id() string - Name() string - Description() string - Enabled() bool - Version() string + EnrichmentMetadata() *config.EnrichmentMetadata + PluginMetadata() PluginMetadata // satisfies plugin.Syncable Checksum() string String() string } diff --git a/pkg/enrichments/launcher.go b/pkg/enrichments/launcher.go index cc9c439..6614e05 100644 --- a/pkg/enrichments/launcher.go +++ b/pkg/enrichments/launcher.go @@ -5,49 +5,79 @@ import ( "fmt" "time" - plugin "github.com/hashicorp/go-plugin" + goplugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/helpers" + "github.com/harishhary/blink/internal/plugin" + internal "github.com/harishhary/blink/internal/pools" + "github.com/harishhary/blink/pkg/enrichments/config" "github.com/harishhary/blink/pkg/enrichments/rpc_enrichments" ) -type EnrichmentAdapter struct{} +type EnrichmentAdapter struct { + Watcher *config.Watcher +} -func (l *EnrichmentAdapter) PluginKey() string { return "enrichment" } -func (l *EnrichmentAdapter) MagicValue() string { return "enrichment_v1" } -func (l *EnrichmentAdapter) GRPCPlugin() plugin.Plugin { return &enrichmentPlugin{} } +func (l *EnrichmentAdapter) PluginKey() string { return "enrichment" } +func (l *EnrichmentAdapter) MagicValue() string { return "enrichment_v1" } +func (l *EnrichmentAdapter) GRPCPlugin() goplugin.Plugin { return &enrichmentPlugin{} } -func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, _ string, hash string) (IEnrichment, pluginmgr.PluginLifecycle, string, string, error) { +// Handshake connects to the enrichment subprocess, calls Init, and returns a +// ready rpcEnrichment. +func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Enrichment, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_enrichments.EnrichmentClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) } - metaCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - meta, err := rpc.GetMetadata(metaCtx, &rpc_enrichments.Empty{}) - cancel() - if err != nil { - return nil, nil, "", "", fmt.Errorf("metadata: %w", err) - } + fileName := helpers.BinaryBaseName(binPath) initCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err = rpc.Init(initCtx, &rpc_enrichments.Empty{}) + _, err := rpc.Init(initCtx, &rpc_enrichments.Empty{}) cancel() if err != nil { return nil, nil, "", "", fmt.Errorf("init: %w", err) } - e := newRpcEnrichment(meta, rpc, hash) - return e, &enrichmentLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil + e := newRpcEnrichment(fileName, rpc, l.Watcher, hash) + cfg := l.Watcher.Current().ByFileName(fileName) + id, name := fileName, fileName + if cfg != nil { + id = cfg.Id() + name = cfg.Name() + } + return e, &enrichmentLifecycle{rpc: rpc}, id, name, nil +} + +// IsReady reports whether this binary's YAML sidecar exists in the current registry. +func (l *EnrichmentAdapter) IsReady(binPath string) bool { + return l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) != nil } -// IsReady always returns true - enrichments have no YAML sidecar prerequisite. -func (l *EnrichmentAdapter) IsReady(_ string) bool { return true } -func (l *EnrichmentAdapter) IsShadow(_ string) bool { return false } -func (l *EnrichmentAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } +// IsShadow reports whether this binary's YAML declares it as a shadow or canary version. +func (l *EnrichmentAdapter) IsShadow(binPath string) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil { + return false + } + m := cfg.RolloutMode() + return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow +} -func (l *EnrichmentAdapter) Workers(_ string) int { return 1 } +// IsEnabled reports whether the enrichment's YAML sidecar still exists and is enabled. +func (l *EnrichmentAdapter) IsEnabled(h *plugin.PluginHandle) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return cfg != nil && cfg.Enabled() +} + +func (l *EnrichmentAdapter) Workers(binPath string) int { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil || cfg.MaxProcs() <= 0 { + return 1 + } + return cfg.MaxProcs() +} type enrichmentLifecycle struct { rpc rpc_enrichments.EnrichmentClient @@ -63,9 +93,11 @@ func (l *enrichmentLifecycle) Shutdown(ctx context.Context) error { return err } -type enrichmentPlugin struct{ plugin.NetRPCUnsupportedPlugin } +type enrichmentPlugin struct { + goplugin.NetRPCUnsupportedPlugin +} -func (p *enrichmentPlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { return nil } -func (p *enrichmentPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { +func (p *enrichmentPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } +func (p *enrichmentPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { return rpc_enrichments.NewEnrichmentClient(c), nil } diff --git a/pkg/enrichments/manager.go b/pkg/enrichments/manager.go index c939a36..2dbcd55 100644 --- a/pkg/enrichments/manager.go +++ b/pkg/enrichments/manager.go @@ -2,11 +2,12 @@ package enrichments import ( "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/pkg/enrichments/config" ) -var enrichmentManagerMetrics = pluginmgr.NewPluginManagerMetrics("enrichmentsvc") +var enrichmentManagerMetrics = plugin.NewPluginManagerMetrics("enrichmentsvc") -func NewManager(log *logger.Logger, notify pluginmgr.Notify, dir string) pluginmgr.Plugin { - return pluginmgr.NewPluginManager[IEnrichment](log, notify, dir, &EnrichmentAdapter{}, enrichmentManagerMetrics) +func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Enrichment] { + return plugin.NewPluginManager[Enrichment](log, notify, dir, &EnrichmentAdapter{Watcher: watcher}, enrichmentManagerMetrics) } diff --git a/pkg/enrichments/pool/pool.go b/pkg/enrichments/pool/pool.go index 6206b06..2f25f70 100644 --- a/pkg/enrichments/pool/pool.go +++ b/pkg/enrichments/pool/pool.go @@ -7,19 +7,19 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/enrichments" ) type Pool struct { - *internal.ProcessPool[enrichments.IEnrichment] + *internal.ProcessPool[enrichments.Enrichment] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[enrichments.IEnrichment](routing.Config(), internal.NewPoolMetrics("enrichments"), drainTimeout), + ProcessPool: internal.NewProcessPool[enrichments.Enrichment](routing.Config(), internal.NewPoolMetrics("enrichments"), drainTimeout), } } @@ -27,8 +27,8 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { // absent/removed refer to the plugin state. errs contains per-alert errors (nil on success). func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts.Alert, canaryHashKey string) (absent bool, removed bool, errs []errors.Error) { errs = make([]errors.Error, len(alerts)) - err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e enrichments.IEnrichment) error { - if !e.Enabled() { + err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e enrichments.Enrichment) error { + if !e.EnrichmentMetadata().Enabled() { return nil } if err := e.Enrich(callCtx, alerts); err != nil { @@ -50,26 +50,27 @@ func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts return false, false, errs } -func poolKey(e enrichments.IEnrichment) internal.PoolKey { - version := e.Version() +func poolKey(e enrichments.Enrichment) internal.PoolKey { + cfg := e.EnrichmentMetadata() + version := cfg.Version() if cs := e.Checksum(); cs != "" { version = version + "@" + cs } - return internal.PoolKey{PluginID: e.Id(), Version: version} + return internal.PoolKey{PluginID: cfg.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []enrichments.IEnrichment, maxProcs int) { + register := func(onDrained func(), items []enrichments.Enrichment, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case pluginmgr.RegisterMessage[enrichments.IEnrichment]: + case plugin.RegisterMessage[enrichments.Enrichment]: register(nil, m.Items, m.MaxProcs) - case pluginmgr.UpdateMessage[enrichments.IEnrichment]: + case plugin.UpdateMessage[enrichments.Enrichment]: register(m.OnDrained, m.Items, m.MaxProcs) - case pluginmgr.UnregisterMessage[enrichments.IEnrichment]: + case plugin.UnregisterMessage[enrichments.Enrichment]: p.Unregister(m.ItemID) - case pluginmgr.RemoveMessage[enrichments.IEnrichment]: + case plugin.RemoveMessage[enrichments.Enrichment]: p.Remove(m.ItemID) } } diff --git a/pkg/enrichments/rpc_enrichment.go b/pkg/enrichments/rpc_enrichment.go index 017b2c9..6a30297 100644 --- a/pkg/enrichments/rpc_enrichment.go +++ b/pkg/enrichments/rpc_enrichment.go @@ -5,40 +5,57 @@ import ( "encoding/json" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" + "github.com/harishhary/blink/pkg/enrichments/config" "github.com/harishhary/blink/pkg/enrichments/rpc_enrichments" ) type rpcEnrichment struct { - meta *rpc_enrichments.EnrichmentMetadata - checksum string - client rpc_enrichments.EnrichmentClient + cfgWatcher *config.Watcher + fileName string + checksum string + client rpc_enrichments.EnrichmentClient } -func newRpcEnrichment(meta *rpc_enrichments.EnrichmentMetadata, client rpc_enrichments.EnrichmentClient, checksum string) *rpcEnrichment { - return &rpcEnrichment{meta: meta, checksum: checksum, client: client} +func newRpcEnrichment(fileName string, client rpc_enrichments.EnrichmentClient, watcher *config.Watcher, checksum string) *rpcEnrichment { + return &rpcEnrichment{ + cfgWatcher: watcher, + fileName: fileName, + checksum: checksum, + client: client, + } +} + +func (r *rpcEnrichment) cfg() *config.EnrichmentMetadata { + if r.cfgWatcher == nil { + return nil + } + return r.cfgWatcher.Current().ByFileName(r.fileName) } -func (r *rpcEnrichment) Id() string { - if id := r.meta.GetId(); id != "" { - return id +// EnrichmentMetadata returns the live YAML-derived enrichment configuration. +func (r *rpcEnrichment) EnrichmentMetadata() *config.EnrichmentMetadata { + if c := r.cfg(); c != nil { + return c } - return r.meta.GetName() + return &config.EnrichmentMetadata{FileNameField: r.fileName} } -func (r *rpcEnrichment) Name() string { return r.meta.GetName() } -func (r *rpcEnrichment) Description() string { return r.meta.GetDescription() } -func (r *rpcEnrichment) Enabled() bool { return r.meta.GetEnabled() } -func (r *rpcEnrichment) Version() string { return r.meta.GetVersion() } + +func (r *rpcEnrichment) PluginMetadata() plugin.PluginMetadata { + return r.EnrichmentMetadata().PluginMetadata() +} + +func (r *rpcEnrichment) DependsOn() []string { return r.EnrichmentMetadata().DependsOn() } func (r *rpcEnrichment) Checksum() string { return r.checksum } -func (r *rpcEnrichment) DependsOn() []string { return r.meta.GetDependsOn() } func (r *rpcEnrichment) String() string { - return "RpcEnrichment '" + r.meta.GetName() + "' id:'" + r.meta.GetId() + "'" + return "RpcEnrichment '" + r.EnrichmentMetadata().Name() + "' id:'" + r.EnrichmentMetadata().Id() + "'" } func (r *rpcEnrichment) Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error { protoAlerts := make([]*rpc_enrichments.Alert, 0, len(alerts)) for _, alrt := range alerts { - b, err := json.Marshal(alrt.Event) + b, err := json.Marshal(alrt) if err != nil { return errors.New(err) } diff --git a/pkg/enrichments/sdk/serve.go b/pkg/enrichments/sdk/serve.go index 2b54fab..e03ed34 100644 --- a/pkg/enrichments/sdk/serve.go +++ b/pkg/enrichments/sdk/serve.go @@ -20,18 +20,12 @@ const ( DefaultTimeout = 5 * time.Second ) -type EnrichmentMetadata struct { - ID string - Name string - Description string - Enabled bool - DependsOn []string - Version string -} - +// EnrichmentPlugin is the interface that all enrichment plugin binaries must implement. +// Embed sdk.BaseEnrichment to get no-op defaults for Init and Shutdown. +// +// All static metadata (name, id, enabled, depends_on, etc.) lives in the YAML +// sidecar file alongside the binary - the subprocess owns only enrichment logic. type EnrichmentPlugin interface { - // Metadata returns static enrichment configuration. Called once during handshake. - Metadata() EnrichmentMetadata // Init is called once after the plugin connects, before any Enrich calls. Init() error // Enrich enriches the alert event fields and returns the modified fields. @@ -40,6 +34,8 @@ type EnrichmentPlugin interface { Shutdown() error } +// BaseEnrichment provides no-op defaults for Init and Shutdown. +// Embed in your enrichment struct to avoid implementing them when not needed. type BaseEnrichment struct{} func (BaseEnrichment) Init() error { return nil } @@ -50,38 +46,10 @@ type server struct { enrichment EnrichmentPlugin } -func (s *server) GetMetadata(_ context.Context, _ *rpc_enrichments.Empty) (*rpc_enrichments.EnrichmentMetadata, error) { - m := s.enrichment.Metadata() - return &rpc_enrichments.EnrichmentMetadata{ - Id: m.ID, - Name: m.Name, - Description: m.Description, - Enabled: m.Enabled, - DependsOn: m.DependsOn, - Version: m.Version, - }, nil -} - func (s *server) Init(_ context.Context, _ *rpc_enrichments.Empty) (*rpc_enrichments.Empty, error) { return &rpc_enrichments.Empty{}, s.enrichment.Init() } -func (s *server) Enrich(ctx context.Context, req *rpc_enrichments.EnrichRequest) (*rpc_enrichments.EnrichResponse, error) { - var alert map[string]any - if err := json.Unmarshal(req.GetAlert().GetJson(), &alert); err != nil { - return nil, err - } - enriched, err := s.enrichment.Enrich(ctx, alert) - if err != nil { - return nil, err - } - b, err2 := json.Marshal(enriched) - if err2 != nil { - return nil, err2 - } - return &rpc_enrichments.EnrichResponse{Alert: &rpc_enrichments.Alert{Json: b}}, nil -} - func (s *server) EnrichBatch(ctx context.Context, req *rpc_enrichments.EnrichBatchRequest) (*rpc_enrichments.EnrichBatchResponse, error) { results := make([]*rpc_enrichments.Alert, 0, len(req.GetAlerts())) for _, a := range req.GetAlerts() { diff --git a/pkg/formatters/formatter.go b/pkg/formatters/formatter.go index 1285298..8a3ca18 100644 --- a/pkg/formatters/formatter.go +++ b/pkg/formatters/formatter.go @@ -4,17 +4,20 @@ import ( "context" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" + "github.com/harishhary/blink/pkg/formatters/config" ) -type IFormatter interface { +// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to +// import an internal package. +type PluginMetadata = plugin.PluginMetadata + +type Formatter interface { Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) - Id() string - Name() string - Description() string - Enabled() bool - Version() string + FormatterMetadata() *config.FormatterMetadata + PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable Checksum() string String() string } diff --git a/pkg/formatters/launcher.go b/pkg/formatters/launcher.go index 2536274..4b9b7b1 100644 --- a/pkg/formatters/launcher.go +++ b/pkg/formatters/launcher.go @@ -5,50 +5,80 @@ import ( "fmt" "time" - plugin "github.com/hashicorp/go-plugin" + goplugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/helpers" + "github.com/harishhary/blink/internal/plugin" + internal "github.com/harishhary/blink/internal/pools" + "github.com/harishhary/blink/pkg/formatters/config" "github.com/harishhary/blink/pkg/formatters/rpc_formatters" ) -// FormatterAdapter implements pluginmgr.PluginAdapter[IFormatter]. -type FormatterAdapter struct{} +// FormatterAdapter implements goplugin.PluginAdapter[Formatter]. +type FormatterAdapter struct { + Watcher *config.Watcher +} func (l *FormatterAdapter) PluginKey() string { return "formatter" } func (l *FormatterAdapter) MagicValue() string { return "formatter_v1" } -func (l *FormatterAdapter) GRPCPlugin() plugin.Plugin { return &formatterPlugin{} } +func (l *FormatterAdapter) GRPCPlugin() goplugin.Plugin { return &formatterPlugin{} } -func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, _ string, hash string) (IFormatter, pluginmgr.PluginLifecycle, string, string, error) { +// Handshake connects to the formatter subprocess, calls Init, and returns a +// ready rpcFormatter. Identity comes from the YAML sidecar, not from a GetMetadata RPC. +func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Formatter, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_formatters.FormatterClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) } - metaCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - meta, err := rpc.GetMetadata(metaCtx, &rpc_formatters.Empty{}) - cancel() - if err != nil { - return nil, nil, "", "", fmt.Errorf("metadata: %w", err) - } + fileName := helpers.BinaryBaseName(binPath) initCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err = rpc.Init(initCtx, &rpc_formatters.Empty{}) + _, err := rpc.Init(initCtx, &rpc_formatters.Empty{}) cancel() if err != nil { return nil, nil, "", "", fmt.Errorf("init: %w", err) } - f := newRpcFormatter(meta, rpc, hash) - return f, &formatterLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil + f := newRpcFormatter(fileName, rpc, l.Watcher, hash) + cfg := l.Watcher.Current().ByFileName(fileName) + id, name := fileName, fileName + if cfg != nil { + id = cfg.Id() + name = cfg.Name() + } + return f, &formatterLifecycle{rpc: rpc}, id, name, nil } -// IsReady always returns true - formatters have no YAML sidecar prerequisite. -func (l *FormatterAdapter) IsReady(_ string) bool { return true } -func (l *FormatterAdapter) IsShadow(_ string) bool { return false } -func (l *FormatterAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } +// IsReady reports whether this binary's YAML sidecar exists in the current registry. +func (l *FormatterAdapter) IsReady(binPath string) bool { + return l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) != nil +} -func (l *FormatterAdapter) Workers(_ string) int { return 1 } +// IsShadow reports whether this binary's YAML declares it as a shadow or canary version. +func (l *FormatterAdapter) IsShadow(binPath string) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil { + return false + } + m := cfg.RolloutMode() + return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow +} + +// IsEnabled reports whether the formatter's YAML sidecar still exists and is enabled. +func (l *FormatterAdapter) IsEnabled(h *plugin.PluginHandle) bool { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return cfg != nil && cfg.Enabled() +} + +func (l *FormatterAdapter) Workers(binPath string) int { + cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if cfg == nil || cfg.MaxProcs() <= 0 { + return 1 + } + return cfg.MaxProcs() +} type formatterLifecycle struct { rpc rpc_formatters.FormatterClient @@ -64,9 +94,9 @@ func (l *formatterLifecycle) Shutdown(ctx context.Context) error { return err } -type formatterPlugin struct{ plugin.NetRPCUnsupportedPlugin } +type formatterPlugin struct{ goplugin.NetRPCUnsupportedPlugin } -func (p *formatterPlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { return nil } -func (p *formatterPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { +func (p *formatterPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } +func (p *formatterPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { return rpc_formatters.NewFormatterClient(c), nil } diff --git a/pkg/formatters/manager.go b/pkg/formatters/manager.go index 631c5ad..99e18c1 100644 --- a/pkg/formatters/manager.go +++ b/pkg/formatters/manager.go @@ -2,11 +2,12 @@ package formatters import ( "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/pkg/formatters/config" ) -var formatterManagerMetrics = pluginmgr.NewPluginManagerMetrics("formatters") +var formatterManagerMetrics = plugin.NewPluginManagerMetrics("formatters") -func NewManager(log *logger.Logger, notify pluginmgr.Notify, dir string) pluginmgr.Plugin { - return pluginmgr.NewPluginManager[IFormatter](log, notify, dir, &FormatterAdapter{}, formatterManagerMetrics) +func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Formatter] { + return plugin.NewPluginManager[Formatter](log, notify, dir, &FormatterAdapter{Watcher: watcher}, formatterManagerMetrics) } diff --git a/pkg/formatters/pool/pool.go b/pkg/formatters/pool/pool.go index 1181339..6e192d8 100644 --- a/pkg/formatters/pool/pool.go +++ b/pkg/formatters/pool/pool.go @@ -7,19 +7,19 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/formatters" ) type Pool struct { - *internal.ProcessPool[formatters.IFormatter] + *internal.ProcessPool[formatters.Formatter] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[formatters.IFormatter](routing.Config(), internal.NewPoolMetrics("formatters"), drainTimeout), + ProcessPool: internal.NewProcessPool[formatters.Formatter](routing.Config(), internal.NewPoolMetrics("formatters"), drainTimeout), } } @@ -30,8 +30,8 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts.Alert, canaryHashKey string) (outs []map[string]any, absent bool, removed bool, errs []errors.Error) { outs = make([]map[string]any, len(alerts)) errs = make([]errors.Error, len(alerts)) - err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f formatters.IFormatter) error { - if !f.Enabled() { + err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f formatters.Formatter) error { + if !f.FormatterMetadata().Enabled() { return nil } batchOuts, e := f.Format(callCtx, alerts) @@ -56,26 +56,27 @@ func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts. return outs, false, false, errs } -func poolKey(f formatters.IFormatter) internal.PoolKey { - version := f.Version() +func poolKey(f formatters.Formatter) internal.PoolKey { + cfg := f.FormatterMetadata() + version := cfg.Version() if cs := f.Checksum(); cs != "" { version = version + "@" + cs } - return internal.PoolKey{PluginID: f.Id(), Version: version} + return internal.PoolKey{PluginID: cfg.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []formatters.IFormatter, maxProcs int) { + register := func(onDrained func(), items []formatters.Formatter, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case pluginmgr.RegisterMessage[formatters.IFormatter]: + case plugin.RegisterMessage[formatters.Formatter]: register(nil, m.Items, m.MaxProcs) - case pluginmgr.UpdateMessage[formatters.IFormatter]: + case plugin.UpdateMessage[formatters.Formatter]: register(m.OnDrained, m.Items, m.MaxProcs) - case pluginmgr.UnregisterMessage[formatters.IFormatter]: + case plugin.UnregisterMessage[formatters.Formatter]: p.Unregister(m.ItemID) - case pluginmgr.RemoveMessage[formatters.IFormatter]: + case plugin.RemoveMessage[formatters.Formatter]: p.Remove(m.ItemID) } } diff --git a/pkg/formatters/rpc_formatter.go b/pkg/formatters/rpc_formatter.go index f1c4fc5..2157e74 100644 --- a/pkg/formatters/rpc_formatter.go +++ b/pkg/formatters/rpc_formatter.go @@ -6,33 +6,57 @@ import ( "fmt" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" + "github.com/harishhary/blink/pkg/formatters/config" "github.com/harishhary/blink/pkg/formatters/rpc_formatters" ) type rpcFormatter struct { - meta *rpc_formatters.FormatterMetadata - checksum string - client rpc_formatters.FormatterClient + cfgWatcher *config.Watcher + fileName string + checksum string + client rpc_formatters.FormatterClient } -func newRpcFormatter(meta *rpc_formatters.FormatterMetadata, client rpc_formatters.FormatterClient, checksum string) *rpcFormatter { - return &rpcFormatter{meta: meta, checksum: checksum, client: client} +func newRpcFormatter(fileName string, client rpc_formatters.FormatterClient, watcher *config.Watcher, checksum string) *rpcFormatter { + return &rpcFormatter{ + cfgWatcher: watcher, + fileName: fileName, + checksum: checksum, + client: client, + } +} + +func (f *rpcFormatter) cfg() *config.FormatterMetadata { + if f.cfgWatcher == nil { + return nil + } + return f.cfgWatcher.Current().ByFileName(f.fileName) } -func (f *rpcFormatter) Id() string { - if id := f.meta.GetId(); id != "" { - return id +// FormatterMetadata returns the live YAML-derived formatter configuration. +func (f *rpcFormatter) FormatterMetadata() *config.FormatterMetadata { + if c := f.cfg(); c != nil { + return c } - return f.meta.GetName() + return &config.FormatterMetadata{FileNameField: f.fileName} } -func (f *rpcFormatter) Name() string { return f.meta.GetName() } -func (f *rpcFormatter) Description() string { return f.meta.GetDescription() } -func (f *rpcFormatter) Enabled() bool { return f.meta.GetEnabled() } -func (f *rpcFormatter) Version() string { return f.meta.GetVersion() } -func (f *rpcFormatter) Checksum() string { return f.checksum } + +func (f *rpcFormatter) PluginMetadata() plugin.PluginMetadata { + c := f.FormatterMetadata() + return plugin.PluginMetadata{ + ID: c.Id(), + Name: c.Name(), + Description: c.Description(), + Enabled: c.Enabled(), + Version: c.Version(), + } +} + +func (f *rpcFormatter) Checksum() string { return f.checksum } func (f *rpcFormatter) String() string { - return fmt.Sprintf("Formatter '%s' (id:%s, enabled:%t)", f.meta.GetName(), f.meta.GetId(), f.meta.GetEnabled()) + return fmt.Sprintf("Formatter '%s' (id:%s)", f.FormatterMetadata().Name(), f.FormatterMetadata().Id()) } func (f *rpcFormatter) Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) { diff --git a/pkg/formatters/sdk/serve.go b/pkg/formatters/sdk/serve.go index ca59e15..ecac91f 100644 --- a/pkg/formatters/sdk/serve.go +++ b/pkg/formatters/sdk/serve.go @@ -18,15 +18,12 @@ const ( MagicValue = "formatter_v1" ) -type FormatterMetadata struct { - ID string - Name string - Description string - Enabled bool -} - +// FormatterPlugin is the interface that all formatter plugin binaries must implement. +// Embed sdk.BaseFormatter to get no-op defaults for Init and Shutdown. +// +// All static metadata (name, id, enabled, etc.) lives in the YAML +// sidecar file alongside the binary — the subprocess owns only formatting logic. type FormatterPlugin interface { - Metadata() FormatterMetadata Init() error Format(ctx context.Context, alert map[string]any) (map[string]any, errors.Error) Shutdown() error @@ -43,36 +40,10 @@ type server struct { formatter FormatterPlugin } -func (s *server) GetMetadata(_ context.Context, _ *rpc_formatters.Empty) (*rpc_formatters.FormatterMetadata, error) { - m := s.formatter.Metadata() - return &rpc_formatters.FormatterMetadata{ - Id: m.ID, - Name: m.Name, - Description: m.Description, - Enabled: m.Enabled, - }, nil -} - func (s *server) Init(_ context.Context, _ *rpc_formatters.Empty) (*rpc_formatters.Empty, error) { return &rpc_formatters.Empty{}, s.formatter.Init() } -func (s *server) Format(ctx context.Context, req *rpc_formatters.FormatRequest) (*rpc_formatters.FormatResponse, error) { - var alert map[string]any - if err := json.Unmarshal(req.GetAlertJson(), &alert); err != nil { - return nil, err - } - result, err := s.formatter.Format(ctx, alert) - if err != nil { - return nil, err - } - b, err2 := json.Marshal(result) - if err2 != nil { - return nil, err2 - } - return &rpc_formatters.FormatResponse{ResultJson: b}, nil -} - func (s *server) FormatBatch(ctx context.Context, req *rpc_formatters.FormatBatchRequest) (*rpc_formatters.FormatBatchResponse, error) { results := make([][]byte, 0, len(req.GetAlertJson())) for _, raw := range req.GetAlertJson() { diff --git a/pkg/matchers/manager.go b/pkg/matchers/manager.go index 0c08133..a3c27cb 100644 --- a/pkg/matchers/manager.go +++ b/pkg/matchers/manager.go @@ -2,11 +2,12 @@ package matchers import ( "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/pkg/matchers/config" ) -var matcherManagerMetrics = pluginmgr.NewPluginManagerMetrics("matchersvc") +var matcherManagerMetrics = plugin.NewPluginManagerMetrics("matchersvc") -func NewManager(log *logger.Logger, notify pluginmgr.Notify, dir string) pluginmgr.Plugin { - return pluginmgr.NewPluginManager[Matcher](log, notify, dir, &MatcherAdapter{}, matcherManagerMetrics) +func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Matcher] { + return plugin.NewPluginManager[Matcher](log, notify, dir, &MatcherAdapter{Watcher: watcher}, matcherManagerMetrics) } diff --git a/pkg/matchers/matcher.go b/pkg/matchers/matcher.go index 17c74b2..c8e0d64 100644 --- a/pkg/matchers/matcher.go +++ b/pkg/matchers/matcher.go @@ -4,15 +4,19 @@ import ( "context" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/matchers/config" ) +// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to +// import an internal package. +type PluginMetadata = plugin.PluginMetadata + type Matcher interface { - Id() string - Name() string - Description() string - Enabled() bool - Version() string + MatcherMetadata() *config.MatcherMetadata + PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable + Global() bool Checksum() string String() string Match(ctx context.Context, evts []events.Event) ([]bool, errors.Error) diff --git a/pkg/matchers/pool/pool.go b/pkg/matchers/pool/pool.go index 06e07a2..685ec64 100644 --- a/pkg/matchers/pool/pool.go +++ b/pkg/matchers/pool/pool.go @@ -6,7 +6,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/events" "github.com/harishhary/blink/pkg/matchers" @@ -27,7 +27,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { var results []bool err := p.Call(ctx, matcherID, canaryHashKey, func(callCtx context.Context, m matchers.Matcher) error { - if !m.Enabled() { + if !m.MatcherMetadata().Enabled() { results = make([]bool, len(evts)) for i := range results { results[i] = true @@ -46,11 +46,12 @@ func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering matchers in the pool. func poolKey(m matchers.Matcher) internal.PoolKey { - version := m.Version() + cfg := m.MatcherMetadata() + version := cfg.Version() if cs := m.Checksum(); cs != "" { version = version + "@" + cs } - return internal.PoolKey{PluginID: m.Id(), Version: version} + return internal.PoolKey{PluginID: cfg.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { @@ -58,13 +59,13 @@ func (p *Pool) Sync(msg messaging.Message) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case pluginmgr.RegisterMessage[matchers.Matcher]: + case plugin.RegisterMessage[matchers.Matcher]: register(nil, m.Items, m.MaxProcs) - case pluginmgr.UpdateMessage[matchers.Matcher]: + case plugin.UpdateMessage[matchers.Matcher]: register(m.OnDrained, m.Items, m.MaxProcs) - case pluginmgr.UnregisterMessage[matchers.Matcher]: + case plugin.UnregisterMessage[matchers.Matcher]: p.Unregister(m.ItemID) - case pluginmgr.RemoveMessage[matchers.Matcher]: + case plugin.RemoveMessage[matchers.Matcher]: p.Remove(m.ItemID) } } diff --git a/pkg/matchers/rpc_matcher.go b/pkg/matchers/rpc_matcher.go index 05118a0..8c149f0 100644 --- a/pkg/matchers/rpc_matcher.go +++ b/pkg/matchers/rpc_matcher.go @@ -6,39 +6,60 @@ import ( "time" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/matchers/config" "github.com/harishhary/blink/pkg/matchers/rpc_matchers" ) type rpcMatcher struct { - client rpc_matchers.MatcherClient - meta *rpc_matchers.MatcherMetadata - checksum string - timeout time.Duration + cfgWatcher *config.Watcher + fileName string + checksum string + client rpc_matchers.MatcherClient + timeout time.Duration } -func newRpcMatcher(meta *rpc_matchers.MatcherMetadata, client rpc_matchers.MatcherClient, timeout time.Duration, checksum string) *rpcMatcher { +func newRpcMatcher(fileName string, client rpc_matchers.MatcherClient, watcher *config.Watcher, timeout time.Duration, checksum string) *rpcMatcher { return &rpcMatcher{ - meta: meta, - checksum: checksum, - client: client, - timeout: timeout, + cfgWatcher: watcher, + fileName: fileName, + checksum: checksum, + client: client, + timeout: timeout, } } -func (r *rpcMatcher) Id() string { - if id := r.meta.GetId(); id != "" { - return id +func (r *rpcMatcher) cfg() *config.MatcherMetadata { + if r.cfgWatcher == nil { + return nil } - return r.meta.GetName() + return r.cfgWatcher.Current().ByFileName(r.fileName) } -func (r *rpcMatcher) Name() string { return r.meta.GetName() } -func (r *rpcMatcher) Description() string { return r.meta.GetDescription() } -func (r *rpcMatcher) Enabled() bool { return r.meta.GetEnabled() } -func (r *rpcMatcher) Version() string { return r.meta.GetVersion() } -func (r *rpcMatcher) Checksum() string { return r.checksum } + +// MatcherMetadata returns the live YAML-derived matcher configuration. +func (r *rpcMatcher) MatcherMetadata() *config.MatcherMetadata { + if c := r.cfg(); c != nil { + return c + } + return &config.MatcherMetadata{FileNameField: r.fileName} +} + +func (r *rpcMatcher) PluginMetadata() plugin.PluginMetadata { + c := r.MatcherMetadata() + return plugin.PluginMetadata{ + ID: c.Id(), + Name: c.Name(), + Description: c.Description(), + Enabled: c.Enabled(), + Version: c.Version(), + } +} + +func (r *rpcMatcher) Global() bool { return r.MatcherMetadata().Global() } +func (r *rpcMatcher) Checksum() string { return r.checksum } func (r *rpcMatcher) String() string { - return "RpcMatcher '" + r.meta.GetName() + "' id:'" + r.meta.GetId() + "'" + return "RpcMatcher '" + r.MatcherMetadata().Name() + "' id:'" + r.MatcherMetadata().Id() + "'" } func (r *rpcMatcher) Match(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { diff --git a/pkg/matchers/sdk/serve.go b/pkg/matchers/sdk/serve.go index 0c43a54..2d8629b 100644 --- a/pkg/matchers/sdk/serve.go +++ b/pkg/matchers/sdk/serve.go @@ -21,20 +21,12 @@ const ( DefaultTimeout = 5 * time.Second ) -// MatcherMetadata holds the static properties of a matcher, returned by MatcherPlugin.Metadata(). -type MatcherMetadata struct { - ID string - Name string - Description string - Enabled bool - Global bool - Version string -} - // MatcherPlugin is the interface that all matcher plugin binaries must implement. // Embed sdk.BaseMatcher to get no-op defaults for Init and Shutdown. +// +// All static metadata (name, id, enabled, global, etc.) lives in the YAML +// sidecar file alongside the binary — the subprocess owns only matching logic. type MatcherPlugin interface { - Metadata() MatcherMetadata Init() error Match(ctx context.Context, event events.Event) (bool, errors.Error) Shutdown() error @@ -53,34 +45,10 @@ type server struct { matcher MatcherPlugin } -func (s *server) GetMetadata(_ context.Context, _ *rpc_matchers.Empty) (*rpc_matchers.MatcherMetadata, error) { - m := s.matcher.Metadata() - return &rpc_matchers.MatcherMetadata{ - Id: m.ID, - Name: m.Name, - Description: m.Description, - Enabled: m.Enabled, - Global: m.Global, - Version: m.Version, - }, nil -} - func (s *server) Init(_ context.Context, _ *rpc_matchers.Empty) (*rpc_matchers.Empty, error) { return &rpc_matchers.Empty{}, s.matcher.Init() } -func (s *server) Match(ctx context.Context, req *rpc_matchers.MatchRequest) (*rpc_matchers.MatchResponse, error) { - var event events.Event - if err := json.Unmarshal(req.GetEvent().GetJson(), &event); err != nil { - return nil, err - } - matched, err := s.matcher.Match(ctx, event) - if err != nil { - return nil, err - } - return &rpc_matchers.MatchResponse{Matched: matched}, nil -} - func (s *server) MatchBatch(ctx context.Context, req *rpc_matchers.MatchBatchRequest) (*rpc_matchers.MatchBatchResponse, error) { results := make([]bool, 0, len(req.GetEvents())) for _, ev := range req.GetEvents() { diff --git a/pkg/rules/helpers.go b/pkg/rules/helpers.go index 25f6f3b..0ae3852 100644 --- a/pkg/rules/helpers.go +++ b/pkg/rules/helpers.go @@ -1,10 +1,13 @@ // pkg/rules/helpers.go package rules -import "github.com/harishhary/blink/pkg/events" +import ( + "github.com/harishhary/blink/pkg/events" + "github.com/harishhary/blink/pkg/rules/config" +) -// Checks that every required subkey is present in the event. Takes Metadata since it only needs static config: Enabled, ReqSubkeys. -func DefaultSubKeysInEvent(r Metadata, event events.Event) bool { +// DefaultSubKeysInEvent checks that every required subkey is present in the event. +func DefaultSubKeysInEvent(r *config.RuleMetadata, event events.Event) bool { if !r.Enabled() { return false } diff --git a/pkg/rules/launcher.go b/pkg/rules/launcher.go index 1a14b79..fc698e4 100644 --- a/pkg/rules/launcher.go +++ b/pkg/rules/launcher.go @@ -5,11 +5,11 @@ import ( "fmt" "time" - plugin "github.com/hashicorp/go-plugin" + goplugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" "github.com/harishhary/blink/internal/helpers" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/rules/rpc_rules" @@ -21,10 +21,10 @@ type RuleAdapter struct { func (l *RuleAdapter) PluginKey() string { return "rule" } func (l *RuleAdapter) MagicValue() string { return "rule_v1" } -func (l *RuleAdapter) GRPCPlugin() plugin.Plugin { return &rulePlugin{} } +func (l *RuleAdapter) GRPCPlugin() goplugin.Plugin { return &rulePlugin{} } // Connects to the rule subprocess, reads the YAML sidecar for its metadata, calls Init, and returns a ready rpcRule. The rule binary's basename must match the YAML file_name field. -func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Rule, pluginmgr.PluginLifecycle, string, string, error) { +func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Rule, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_rules.RuleClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) @@ -75,7 +75,7 @@ func (l *RuleAdapter) IsShadow(binPath string) bool { // IsEnabled reports whether the rule's YAML sidecar still exists and is enabled. // Called during every reconcile func so process-zombies (binary running but YAML removed/disabled) are stopped without waiting for a binary change. -func (l *RuleAdapter) IsEnabled(h *pluginmgr.PluginHandle) bool { +func (l *RuleAdapter) IsEnabled(h *plugin.PluginHandle) bool { cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) return cfg != nil && cfg.Enabled() } @@ -103,11 +103,11 @@ func (l *ruleLifecycle) Shutdown(ctx context.Context) error { } // rulePlugin is the go-plugin client-side stub. -type rulePlugin struct{ plugin.NetRPCUnsupportedPlugin } +type rulePlugin struct{ goplugin.NetRPCUnsupportedPlugin } -func (p *rulePlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { +func (p *rulePlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } -func (p *rulePlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { +func (p *rulePlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { return rpc_rules.NewRuleClient(c), nil } diff --git a/pkg/rules/manager.go b/pkg/rules/manager.go index fead3e4..45475b0 100644 --- a/pkg/rules/manager.go +++ b/pkg/rules/manager.go @@ -2,12 +2,12 @@ package rules import ( "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/rules/config" ) -var ruleManagerMetrics = pluginmgr.NewPluginManagerMetrics("rulesvc") +var ruleManagerMetrics = plugin.NewPluginManagerMetrics("rulesvc") -func NewManager(log *logger.Logger, notify pluginmgr.Notify, dir string, watcher *config.Watcher) *pluginmgr.PluginManager[Rule] { - return pluginmgr.NewPluginManager[Rule](log, notify, dir, &RuleAdapter{Watcher: watcher}, ruleManagerMetrics) +func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Rule] { + return plugin.NewPluginManager[Rule](log, notify, dir, &RuleAdapter{Watcher: watcher}, ruleManagerMetrics) } diff --git a/pkg/rules/pool/pool.go b/pkg/rules/pool/pool.go index 537799c..61ce356 100644 --- a/pkg/rules/pool/pool.go +++ b/pkg/rules/pool/pool.go @@ -6,7 +6,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/events" "github.com/harishhary/blink/pkg/rules" @@ -30,11 +30,11 @@ func NewPool(watcher *config.Watcher, drainTimeout time.Duration) *Pool { } // Evaluate runs all evts against the rule identified by ruleID in a single pool call. -func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { - var results []bool +func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]rules.EvalResult, errors.Error) { + var results []rules.EvalResult err := p.Call(ctx, ruleID, canaryHashKey, func(ctx context.Context, r rules.Rule) error { - if !r.Enabled() { - results = make([]bool, len(evts)) + if !r.RuleMetadata().Enabled() { + results = make([]rules.EvalResult, len(evts)) return nil } var e errors.Error @@ -52,23 +52,24 @@ func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, // always produces a distinct key even if the operator forgot to bump the version // string in the rule config - preventing silent same-key overwrites in the pool. func poolKey(r rules.Rule) internal.PoolKey { - version := r.Version() + cfg := r.RuleMetadata() + version := cfg.Version() if cs := r.Checksum(); cs != "" { version = version + "@" + cs } - return internal.PoolKey{PluginID: r.Id(), Version: version} + return internal.PoolKey{PluginID: cfg.Id(), Version: version} } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering rules in the pool. func (p *Pool) Sync(msg messaging.Message) { switch m := msg.(type) { - case pluginmgr.RegisterMessage[rules.Rule]: + case plugin.RegisterMessage[rules.Rule]: p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, nil) - case pluginmgr.UpdateMessage[rules.Rule]: + case plugin.UpdateMessage[rules.Rule]: p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, m.OnDrained) - case pluginmgr.UnregisterMessage[rules.Rule]: + case plugin.UnregisterMessage[rules.Rule]: p.Unregister(m.ItemID) - case pluginmgr.RemoveMessage[rules.Rule]: + case plugin.RemoveMessage[rules.Rule]: p.Remove(m.ItemID) } } diff --git a/pkg/rules/rpc_rules.go b/pkg/rules/rpc_rules.go index c8f9317..ac80154 100644 --- a/pkg/rules/rpc_rules.go +++ b/pkg/rules/rpc_rules.go @@ -3,13 +3,12 @@ package rules import ( "context" "encoding/json" - "time" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/rules/rpc_rules" - "github.com/harishhary/blink/pkg/scoring" ) // This is the executor-side wrapper for a live rule subprocess. @@ -17,7 +16,7 @@ type rpcRule struct { client rpc_rules.RuleClient cfgWatcher *config.Watcher fileName string - checksum string // SHA-256 ofthe binary + checksum string // SHA-256 of the binary } func newRpcRule(fileName string, client rpc_rules.RuleClient, watcher *config.Watcher, checksum string) *rpcRule { @@ -36,205 +35,26 @@ func (r *rpcRule) cfg() *config.RuleMetadata { return r.cfgWatcher.Current().ByFileName(r.fileName) } -func (r *rpcRule) Id() string { +// RuleMetadata returns the live YAML-derived rule configuration for this plugin. +func (r *rpcRule) RuleMetadata() *config.RuleMetadata { if c := r.cfg(); c != nil { - return c.Id() + return c } - return "" + // Return a minimal stub so callers don't need to nil-check. + return &config.RuleMetadata{FileNameField: r.fileName} } -func (r *rpcRule) Name() string { - if c := r.cfg(); c != nil { - return c.Name() - } - return r.fileName -} - -func (r *rpcRule) Enabled() bool { - c := r.cfg() - return c != nil && c.Enabled() -} - -func (r *rpcRule) Description() string { - if c := r.cfg(); c != nil { - return c.Description() - } - return "" -} - -func (r *rpcRule) FileName() string { - return r.fileName -} - -func (r *rpcRule) DisplayName() string { - if c := r.cfg(); c != nil { - return c.DisplayName() - } - return "" -} - -func (r *rpcRule) References() []string { - if c := r.cfg(); c != nil { - return c.References() - } - return nil -} - -func (r *rpcRule) Severity() scoring.Severity { - if c := r.cfg(); c != nil { - return c.Severity() - } - return scoring.SeverityInfo -} - -func (r *rpcRule) Confidence() scoring.Confidence { - if c := r.cfg(); c != nil { - return c.Confidence() - } - return scoring.ConfidenceVeryLow -} - -func (r *rpcRule) RiskScore() scoring.RiskScore { - if c := r.cfg(); c != nil { - return c.RiskScore() - } - return scoring.ComputeRiskScore(scoring.ConfidenceVeryLow, scoring.SeverityInfo) -} - -func (r *rpcRule) MergeByKeys() []string { - if c := r.cfg(); c != nil { - return c.MergeByKeys() - } - return nil -} - -func (r *rpcRule) MergeWindowMins() time.Duration { - if c := r.cfg(); c != nil { - return c.MergeWindowMins() - } - return 0 -} - -func (r *rpcRule) ReqSubkeys() []string { - if c := r.cfg(); c != nil { - return c.ReqSubkeys() - } - return nil -} - -func (r *rpcRule) Signal() bool { - if c := r.cfg(); c != nil { - return c.Signal() - } - return false -} - -func (r *rpcRule) SignalThreshold() scoring.Confidence { - if c := r.cfg(); c != nil { - return c.SignalThreshold() - } - return scoring.ConfidenceVeryLow -} - -func (r *rpcRule) Tags() []string { - if c := r.cfg(); c != nil { - return c.Tags() - } - return nil -} - -func (r *rpcRule) Dispatchers() []string { - if c := r.cfg(); c != nil { - return c.Dispatchers() - } - return nil -} - -func (r *rpcRule) LogTypes() []string { - if c := r.cfg(); c != nil { - return c.LogTypes() - } - return nil -} - -func (r *rpcRule) Observables() []Observables { - return nil -} - -func (r *rpcRule) Matchers() []string { - if c := r.cfg(); c != nil { - return c.Matchers() - } - return nil -} - -func (r *rpcRule) Formatters() []string { - if c := r.cfg(); c != nil { - return c.Formatters() - } - return nil -} - -func (r *rpcRule) Enrichments() []string { - if c := r.cfg(); c != nil { - return c.Enrichments() - } - return nil -} - -func (r *rpcRule) TuningRules() []string { - if c := r.cfg(); c != nil { - return c.TuningRules() - } - return nil -} - -func (r *rpcRule) Version() string { - if c := r.cfg(); c != nil { - return c.Version() - } - return "" -} - -func (r *rpcRule) Checksum() string { - return r.checksum -} - -// --- Optional capability interfaces --- - -func (r *rpcRule) AlertTitle(_ events.Event) string { - if c := r.cfg(); c != nil { - return c.Name() - } - return r.fileName -} +func (r *rpcRule) Checksum() string { return r.checksum } -func (r *rpcRule) AlertDescription(_ events.Event) string { +func (r *rpcRule) PluginMetadata() plugin.PluginMetadata { if c := r.cfg(); c != nil { - return c.Description() + return c.PluginMetadata() } - return "" -} - -func (r *rpcRule) AlertContext(_ events.Event) map[string]any { - return nil -} - -func (r *rpcRule) DynamicSeverity(_ events.Event) scoring.Severity { - return r.Severity() -} - -func (r *rpcRule) Dedup(_ events.Event) []string { - return r.MergeByKeys() -} - -// SubKeyFilter uses the YAML config (via cfg()) so the subprocess is not invoked. -func (r *rpcRule) SubKeysInEvent(event events.Event) bool { - return DefaultSubKeysInEvent(r, event) + return plugin.PluginMetadata{Name: r.fileName} } // ctx carries the caller's deadline (e.g. the executor's per-event timeout). -func (r *rpcRule) Evaluate(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { +func (r *rpcRule) Evaluate(ctx context.Context, evts []events.Event) ([]EvalResult, errors.Error) { protoEvents := make([]*rpc_rules.Event, 0, len(evts)) for _, ev := range evts { b, err := json.Marshal(ev) @@ -247,5 +67,23 @@ func (r *rpcRule) Evaluate(ctx context.Context, evts []events.Event) ([]bool, er if err != nil { return nil, errors.New(err) } - return resp.GetMatched(), nil + + out := make([]EvalResult, len(resp.GetResults())) + for i, r := range resp.GetResults() { + res := EvalResult{ + Matched: r.GetMatched(), + Title: r.GetTitle(), + Description: r.GetDescription(), + Severity: r.GetSeverity(), + MergeByKeys: r.GetMergeByKeys(), + } + if b := r.GetContextJson(); len(b) > 0 { + var ctx map[string]any + if err := json.Unmarshal(b, &ctx); err == nil { + res.Context = ctx + } + } + out[i] = res + } + return out, nil } diff --git a/pkg/rules/rpc_rules/rule.pb.go b/pkg/rules/rpc_rules/rule.pb.go index 2912d55..361feb0 100644 --- a/pkg/rules/rpc_rules/rule.pb.go +++ b/pkg/rules/rpc_rules/rule.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.9 // protoc v7.34.0 -// source: rule.proto +// source: pkg/rules/rpc_rules/rule.proto package rpc_rules @@ -29,7 +29,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_rule_proto_msgTypes[0] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[0] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,195 +54,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{0} -} - -type Metadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // stable rule id (or host fills if empty) - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - Severity string `protobuf:"bytes,5,opt,name=severity,proto3" json:"severity,omitempty"` // "info|low|medium|high|critical" - Confidence string `protobuf:"bytes,6,opt,name=confidence,proto3" json:"confidence,omitempty"` // "very_low|low|..." - MergeByKeys []string `protobuf:"bytes,7,rep,name=merge_by_keys,json=mergeByKeys,proto3" json:"merge_by_keys,omitempty"` - MergeWindowMins uint32 `protobuf:"varint,8,opt,name=merge_window_mins,json=mergeWindowMins,proto3" json:"merge_window_mins,omitempty"` - ReqSubkeys []string `protobuf:"bytes,9,rep,name=req_subkeys,json=reqSubkeys,proto3" json:"req_subkeys,omitempty"` - Signal bool `protobuf:"varint,10,opt,name=signal,proto3" json:"signal,omitempty"` - SignalThreshold string `protobuf:"bytes,11,opt,name=signal_threshold,json=signalThreshold,proto3" json:"signal_threshold,omitempty"` // same enum as confidence - Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` - Dispatchers []string `protobuf:"bytes,13,rep,name=dispatchers,proto3" json:"dispatchers,omitempty"` - LogTypes []string `protobuf:"bytes,14,rep,name=log_types,json=logTypes,proto3" json:"log_types,omitempty"` - Matchers []string `protobuf:"bytes,15,rep,name=matchers,proto3" json:"matchers,omitempty"` - Formatters []string `protobuf:"bytes,16,rep,name=formatters,proto3" json:"formatters,omitempty"` - Enrichments []string `protobuf:"bytes,17,rep,name=enrichments,proto3" json:"enrichments,omitempty"` - TuningRules []string `protobuf:"bytes,18,rep,name=tuning_rules,json=tuningRules,proto3" json:"tuning_rules,omitempty"` - Version string `protobuf:"bytes,19,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Metadata) Reset() { - *x = Metadata{} - mi := &file_rule_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metadata) ProtoMessage() {} - -func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. -func (*Metadata) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{1} -} - -func (x *Metadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Metadata) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Metadata) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Metadata) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *Metadata) GetSeverity() string { - if x != nil { - return x.Severity - } - return "" -} - -func (x *Metadata) GetConfidence() string { - if x != nil { - return x.Confidence - } - return "" -} - -func (x *Metadata) GetMergeByKeys() []string { - if x != nil { - return x.MergeByKeys - } - return nil -} - -func (x *Metadata) GetMergeWindowMins() uint32 { - if x != nil { - return x.MergeWindowMins - } - return 0 -} - -func (x *Metadata) GetReqSubkeys() []string { - if x != nil { - return x.ReqSubkeys - } - return nil -} - -func (x *Metadata) GetSignal() bool { - if x != nil { - return x.Signal - } - return false -} - -func (x *Metadata) GetSignalThreshold() string { - if x != nil { - return x.SignalThreshold - } - return "" -} - -func (x *Metadata) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Metadata) GetDispatchers() []string { - if x != nil { - return x.Dispatchers - } - return nil -} - -func (x *Metadata) GetLogTypes() []string { - if x != nil { - return x.LogTypes - } - return nil -} - -func (x *Metadata) GetMatchers() []string { - if x != nil { - return x.Matchers - } - return nil -} - -func (x *Metadata) GetFormatters() []string { - if x != nil { - return x.Formatters - } - return nil -} - -func (x *Metadata) GetEnrichments() []string { - if x != nil { - return x.Enrichments - } - return nil -} - -func (x *Metadata) GetTuningRules() []string { - if x != nil { - return x.TuningRules - } - return nil -} - -func (x *Metadata) GetVersion() string { - if x != nil { - return x.Version - } - return "" + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{0} } type Event struct { @@ -254,7 +66,7 @@ type Event struct { func (x *Event) Reset() { *x = Event{} - mi := &file_rule_proto_msgTypes[2] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -266,7 +78,7 @@ func (x *Event) String() string { func (*Event) ProtoMessage() {} func (x *Event) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[2] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -279,7 +91,7 @@ func (x *Event) ProtoReflect() protoreflect.Message { // Deprecated: Use Event.ProtoReflect.Descriptor instead. func (*Event) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{2} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{1} } func (x *Event) GetJson() []byte { @@ -298,7 +110,7 @@ type EvaluateRequest struct { func (x *EvaluateRequest) Reset() { *x = EvaluateRequest{} - mi := &file_rule_proto_msgTypes[3] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -310,7 +122,7 @@ func (x *EvaluateRequest) String() string { func (*EvaluateRequest) ProtoMessage() {} func (x *EvaluateRequest) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[3] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -323,7 +135,7 @@ func (x *EvaluateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateRequest.ProtoReflect.Descriptor instead. func (*EvaluateRequest) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{3} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{2} } func (x *EvaluateRequest) GetEvent() *Event { @@ -342,7 +154,7 @@ type EvaluateResponse struct { func (x *EvaluateResponse) Reset() { *x = EvaluateResponse{} - mi := &file_rule_proto_msgTypes[4] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -354,7 +166,7 @@ func (x *EvaluateResponse) String() string { func (*EvaluateResponse) ProtoMessage() {} func (x *EvaluateResponse) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[4] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -367,7 +179,7 @@ func (x *EvaluateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateResponse.ProtoReflect.Descriptor instead. func (*EvaluateResponse) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{4} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{3} } func (x *EvaluateResponse) GetMatched() bool { @@ -386,7 +198,7 @@ type EvaluateBatchRequest struct { func (x *EvaluateBatchRequest) Reset() { *x = EvaluateBatchRequest{} - mi := &file_rule_proto_msgTypes[5] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -398,7 +210,7 @@ func (x *EvaluateBatchRequest) String() string { func (*EvaluateBatchRequest) ProtoMessage() {} func (x *EvaluateBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[5] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -411,7 +223,7 @@ func (x *EvaluateBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateBatchRequest.ProtoReflect.Descriptor instead. func (*EvaluateBatchRequest) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{5} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{4} } func (x *EvaluateBatchRequest) GetEvents() []*Event { @@ -421,16 +233,102 @@ func (x *EvaluateBatchRequest) GetEvents() []*Event { return nil } +// EventResult carries the match outcome and any optional per-event overrides. +// Empty string / nil fields mean "use YAML default". +type EventResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + Matched bool `protobuf:"varint,1,opt,name=matched,proto3" json:"matched,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Severity string `protobuf:"bytes,4,opt,name=severity,proto3" json:"severity,omitempty"` // "info|low|medium|high|critical"; "" = YAML default + ContextJson []byte `protobuf:"bytes,5,opt,name=context_json,json=contextJson,proto3" json:"context_json,omitempty"` // JSON-encoded map[string]any; empty = nil + MergeByKeys []string `protobuf:"bytes,6,rep,name=merge_by_keys,json=mergeByKeys,proto3" json:"merge_by_keys,omitempty"` // empty = use YAML merge_by_keys + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EventResult) Reset() { + *x = EventResult{} + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EventResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventResult) ProtoMessage() {} + +func (x *EventResult) ProtoReflect() protoreflect.Message { + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventResult.ProtoReflect.Descriptor instead. +func (*EventResult) Descriptor() ([]byte, []int) { + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{5} +} + +func (x *EventResult) GetMatched() bool { + if x != nil { + return x.Matched + } + return false +} + +func (x *EventResult) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *EventResult) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *EventResult) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *EventResult) GetContextJson() []byte { + if x != nil { + return x.ContextJson + } + return nil +} + +func (x *EventResult) GetMergeByKeys() []string { + if x != nil { + return x.MergeByKeys + } + return nil +} + type EvaluateBatchResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Matched []bool `protobuf:"varint,1,rep,packed,name=matched,proto3" json:"matched,omitempty"` + Results []*EventResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *EvaluateBatchResponse) Reset() { *x = EvaluateBatchResponse{} - mi := &file_rule_proto_msgTypes[6] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -442,7 +340,7 @@ func (x *EvaluateBatchResponse) String() string { func (*EvaluateBatchResponse) ProtoMessage() {} func (x *EvaluateBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_rule_proto_msgTypes[6] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -455,49 +353,22 @@ func (x *EvaluateBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateBatchResponse.ProtoReflect.Descriptor instead. func (*EvaluateBatchResponse) Descriptor() ([]byte, []int) { - return file_rule_proto_rawDescGZIP(), []int{6} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{6} } -func (x *EvaluateBatchResponse) GetMatched() []bool { +func (x *EvaluateBatchResponse) GetResults() []*EventResult { if x != nil { - return x.Matched + return x.Results } return nil } -var File_rule_proto protoreflect.FileDescriptor +var File_pkg_rules_rpc_rules_rule_proto protoreflect.FileDescriptor -const file_rule_proto_rawDesc = "" + - "\n" + - "\n" + - "rule.proto\x12\x05rules\"\a\n" + - "\x05Empty\"\xc8\x04\n" + - "\bMetadata\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + - "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x1a\n" + - "\bseverity\x18\x05 \x01(\tR\bseverity\x12\x1e\n" + - "\n" + - "confidence\x18\x06 \x01(\tR\n" + - "confidence\x12\"\n" + - "\rmerge_by_keys\x18\a \x03(\tR\vmergeByKeys\x12*\n" + - "\x11merge_window_mins\x18\b \x01(\rR\x0fmergeWindowMins\x12\x1f\n" + - "\vreq_subkeys\x18\t \x03(\tR\n" + - "reqSubkeys\x12\x16\n" + - "\x06signal\x18\n" + - " \x01(\bR\x06signal\x12)\n" + - "\x10signal_threshold\x18\v \x01(\tR\x0fsignalThreshold\x12\x12\n" + - "\x04tags\x18\f \x03(\tR\x04tags\x12 \n" + - "\vdispatchers\x18\r \x03(\tR\vdispatchers\x12\x1b\n" + - "\tlog_types\x18\x0e \x03(\tR\blogTypes\x12\x1a\n" + - "\bmatchers\x18\x0f \x03(\tR\bmatchers\x12\x1e\n" + +const file_pkg_rules_rpc_rules_rule_proto_rawDesc = "" + "\n" + - "formatters\x18\x10 \x03(\tR\n" + - "formatters\x12 \n" + - "\venrichments\x18\x11 \x03(\tR\venrichments\x12!\n" + - "\ftuning_rules\x18\x12 \x03(\tR\vtuningRules\x12\x18\n" + - "\aversion\x18\x13 \x01(\tR\aversion\"\x1b\n" + + "\x1epkg/rules/rpc_rules/rule.proto\x12\x05rules\"\a\n" + + "\x05Empty\"\x1b\n" + "\x05Event\x12\x12\n" + "\x04json\x18\x01 \x01(\fR\x04json\"5\n" + "\x0fEvaluateRequest\x12\"\n" + @@ -505,11 +376,17 @@ const file_rule_proto_rawDesc = "" + "\x10EvaluateResponse\x12\x18\n" + "\amatched\x18\x01 \x01(\bR\amatched\"<\n" + "\x14EvaluateBatchRequest\x12$\n" + - "\x06events\x18\x01 \x03(\v2\f.rules.EventR\x06events\"1\n" + - "\x15EvaluateBatchResponse\x12\x18\n" + - "\amatched\x18\x01 \x03(\bR\amatched2\xad\x02\n" + - "\x04Rule\x12,\n" + - "\vGetMetadata\x12\f.rules.Empty\x1a\x0f.rules.Metadata\x12\"\n" + + "\x06events\x18\x01 \x03(\v2\f.rules.EventR\x06events\"\xc2\x01\n" + + "\vEventResult\x12\x18\n" + + "\amatched\x18\x01 \x01(\bR\amatched\x12\x14\n" + + "\x05title\x18\x02 \x01(\tR\x05title\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x1a\n" + + "\bseverity\x18\x04 \x01(\tR\bseverity\x12!\n" + + "\fcontext_json\x18\x05 \x01(\fR\vcontextJson\x12\"\n" + + "\rmerge_by_keys\x18\x06 \x03(\tR\vmergeByKeys\"E\n" + + "\x15EvaluateBatchResponse\x12,\n" + + "\aresults\x18\x02 \x03(\v2\x12.rules.EventResultR\aresults2\xff\x01\n" + + "\x04Rule\x12\"\n" + "\x04Init\x12\f.rules.Empty\x1a\f.rules.Empty\x12;\n" + "\bEvaluate\x12\x16.rules.EvaluateRequest\x1a\x17.rules.EvaluateResponse\x12J\n" + "\rEvaluateBatch\x12\x1b.rules.EvaluateBatchRequest\x1a\x1c.rules.EvaluateBatchResponse\x12&\n" + @@ -517,69 +394,68 @@ const file_rule_proto_rawDesc = "" + "\x04Ping\x12\f.rules.Empty\x1a\f.rules.EmptyB\x16Z\x14rpc_rules/;rpc_rulesb\x06proto3" var ( - file_rule_proto_rawDescOnce sync.Once - file_rule_proto_rawDescData []byte + file_pkg_rules_rpc_rules_rule_proto_rawDescOnce sync.Once + file_pkg_rules_rpc_rules_rule_proto_rawDescData []byte ) -func file_rule_proto_rawDescGZIP() []byte { - file_rule_proto_rawDescOnce.Do(func() { - file_rule_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rule_proto_rawDesc), len(file_rule_proto_rawDesc))) +func file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP() []byte { + file_pkg_rules_rpc_rules_rule_proto_rawDescOnce.Do(func() { + file_pkg_rules_rpc_rules_rule_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_rules_rpc_rules_rule_proto_rawDesc), len(file_pkg_rules_rpc_rules_rule_proto_rawDesc))) }) - return file_rule_proto_rawDescData + return file_pkg_rules_rpc_rules_rule_proto_rawDescData } -var file_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_rule_proto_goTypes = []any{ +var file_pkg_rules_rpc_rules_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_rules_rpc_rules_rule_proto_goTypes = []any{ (*Empty)(nil), // 0: rules.Empty - (*Metadata)(nil), // 1: rules.Metadata - (*Event)(nil), // 2: rules.Event - (*EvaluateRequest)(nil), // 3: rules.EvaluateRequest - (*EvaluateResponse)(nil), // 4: rules.EvaluateResponse - (*EvaluateBatchRequest)(nil), // 5: rules.EvaluateBatchRequest + (*Event)(nil), // 1: rules.Event + (*EvaluateRequest)(nil), // 2: rules.EvaluateRequest + (*EvaluateResponse)(nil), // 3: rules.EvaluateResponse + (*EvaluateBatchRequest)(nil), // 4: rules.EvaluateBatchRequest + (*EventResult)(nil), // 5: rules.EventResult (*EvaluateBatchResponse)(nil), // 6: rules.EvaluateBatchResponse } -var file_rule_proto_depIdxs = []int32{ - 2, // 0: rules.EvaluateRequest.event:type_name -> rules.Event - 2, // 1: rules.EvaluateBatchRequest.events:type_name -> rules.Event - 0, // 2: rules.Rule.GetMetadata:input_type -> rules.Empty +var file_pkg_rules_rpc_rules_rule_proto_depIdxs = []int32{ + 1, // 0: rules.EvaluateRequest.event:type_name -> rules.Event + 1, // 1: rules.EvaluateBatchRequest.events:type_name -> rules.Event + 5, // 2: rules.EvaluateBatchResponse.results:type_name -> rules.EventResult 0, // 3: rules.Rule.Init:input_type -> rules.Empty - 3, // 4: rules.Rule.Evaluate:input_type -> rules.EvaluateRequest - 5, // 5: rules.Rule.EvaluateBatch:input_type -> rules.EvaluateBatchRequest + 2, // 4: rules.Rule.Evaluate:input_type -> rules.EvaluateRequest + 4, // 5: rules.Rule.EvaluateBatch:input_type -> rules.EvaluateBatchRequest 0, // 6: rules.Rule.Shutdown:input_type -> rules.Empty 0, // 7: rules.Rule.Ping:input_type -> rules.Empty - 1, // 8: rules.Rule.GetMetadata:output_type -> rules.Metadata - 0, // 9: rules.Rule.Init:output_type -> rules.Empty - 4, // 10: rules.Rule.Evaluate:output_type -> rules.EvaluateResponse - 6, // 11: rules.Rule.EvaluateBatch:output_type -> rules.EvaluateBatchResponse - 0, // 12: rules.Rule.Shutdown:output_type -> rules.Empty - 0, // 13: rules.Rule.Ping:output_type -> rules.Empty - 8, // [8:14] is the sub-list for method output_type - 2, // [2:8] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_rule_proto_init() } -func file_rule_proto_init() { - if File_rule_proto != nil { + 0, // 8: rules.Rule.Init:output_type -> rules.Empty + 3, // 9: rules.Rule.Evaluate:output_type -> rules.EvaluateResponse + 6, // 10: rules.Rule.EvaluateBatch:output_type -> rules.EvaluateBatchResponse + 0, // 11: rules.Rule.Shutdown:output_type -> rules.Empty + 0, // 12: rules.Rule.Ping:output_type -> rules.Empty + 8, // [8:13] is the sub-list for method output_type + 3, // [3:8] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_pkg_rules_rpc_rules_rule_proto_init() } +func file_pkg_rules_rpc_rules_rule_proto_init() { + if File_pkg_rules_rpc_rules_rule_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_rule_proto_rawDesc), len(file_rule_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_rules_rpc_rules_rule_proto_rawDesc), len(file_pkg_rules_rpc_rules_rule_proto_rawDesc)), NumEnums: 0, NumMessages: 7, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_rule_proto_goTypes, - DependencyIndexes: file_rule_proto_depIdxs, - MessageInfos: file_rule_proto_msgTypes, + GoTypes: file_pkg_rules_rpc_rules_rule_proto_goTypes, + DependencyIndexes: file_pkg_rules_rpc_rules_rule_proto_depIdxs, + MessageInfos: file_pkg_rules_rpc_rules_rule_proto_msgTypes, }.Build() - File_rule_proto = out.File - file_rule_proto_goTypes = nil - file_rule_proto_depIdxs = nil + File_pkg_rules_rpc_rules_rule_proto = out.File + file_pkg_rules_rpc_rules_rule_proto_goTypes = nil + file_pkg_rules_rpc_rules_rule_proto_depIdxs = nil } diff --git a/pkg/rules/rpc_rules/rule.proto b/pkg/rules/rpc_rules/rule.proto index 9a8a3f8..c7f22d9 100644 --- a/pkg/rules/rpc_rules/rule.proto +++ b/pkg/rules/rpc_rules/rule.proto @@ -11,7 +11,21 @@ message EvaluateRequest { Event event = 1; } message EvaluateResponse { bool matched = 1; } message EvaluateBatchRequest { repeated Event events = 1; } -message EvaluateBatchResponse { repeated bool matched = 1; } + +// EventResult carries the match outcome and any optional per-event overrides. +// Empty string / nil fields mean "use YAML default". +message EventResult { + bool matched = 1; + string title = 2; + string description = 3; + string severity = 4; // "info|low|medium|high|critical"; "" = YAML default + bytes context_json = 5; // JSON-encoded map[string]any; empty = nil + repeated string merge_by_keys = 6; // empty = use YAML merge_by_keys +} + +message EvaluateBatchResponse { + repeated EventResult results = 2; +} service Rule { rpc Init(Empty) returns (Empty); diff --git a/pkg/rules/rpc_rules/rule_grpc.pb.go b/pkg/rules/rpc_rules/rule_grpc.pb.go index f8aa347..f13087e 100644 --- a/pkg/rules/rpc_rules/rule_grpc.pb.go +++ b/pkg/rules/rpc_rules/rule_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v7.34.0 -// source: rule.proto +// source: pkg/rules/rpc_rules/rule.proto package rpc_rules @@ -19,7 +19,6 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - Rule_GetMetadata_FullMethodName = "/rules.Rule/GetMetadata" Rule_Init_FullMethodName = "/rules.Rule/Init" Rule_Evaluate_FullMethodName = "/rules.Rule/Evaluate" Rule_EvaluateBatch_FullMethodName = "/rules.Rule/EvaluateBatch" @@ -31,7 +30,6 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type RuleClient interface { - GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Metadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) EvaluateBatch(ctx context.Context, in *EvaluateBatchRequest, opts ...grpc.CallOption) (*EvaluateBatchResponse, error) @@ -47,16 +45,6 @@ func NewRuleClient(cc grpc.ClientConnInterface) RuleClient { return &ruleClient{cc} } -func (c *ruleClient) GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Metadata, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(Metadata) - err := c.cc.Invoke(ctx, Rule_GetMetadata_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *ruleClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -111,7 +99,6 @@ func (c *ruleClient) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOptio // All implementations must embed UnimplementedRuleServer // for forward compatibility. type RuleServer interface { - GetMetadata(context.Context, *Empty) (*Metadata, error) Init(context.Context, *Empty) (*Empty, error) Evaluate(context.Context, *EvaluateRequest) (*EvaluateResponse, error) EvaluateBatch(context.Context, *EvaluateBatchRequest) (*EvaluateBatchResponse, error) @@ -127,9 +114,6 @@ type RuleServer interface { // pointer dereference when methods are called. type UnimplementedRuleServer struct{} -func (UnimplementedRuleServer) GetMetadata(context.Context, *Empty) (*Metadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") -} func (UnimplementedRuleServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } @@ -166,24 +150,6 @@ func RegisterRuleServer(s grpc.ServiceRegistrar, srv RuleServer) { s.RegisterService(&Rule_ServiceDesc, srv) } -func _Rule_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RuleServer).GetMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Rule_GetMetadata_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RuleServer).GetMetadata(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _Rule_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -281,10 +247,6 @@ var Rule_ServiceDesc = grpc.ServiceDesc{ ServiceName: "rules.Rule", HandlerType: (*RuleServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetadata", - Handler: _Rule_GetMetadata_Handler, - }, { MethodName: "Init", Handler: _Rule_Init_Handler, @@ -307,5 +269,5 @@ var Rule_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "rule.proto", + Metadata: "pkg/rules/rpc_rules/rule.proto", } diff --git a/pkg/rules/rule.go b/pkg/rules/rule.go index 7693a76..3b4c4d7 100644 --- a/pkg/rules/rule.go +++ b/pkg/rules/rule.go @@ -2,80 +2,34 @@ package rules import ( "context" - "time" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" "github.com/harishhary/blink/pkg/rules/config" - "github.com/harishhary/blink/pkg/scoring" ) type Observables = config.Observable -// Metadata carries all static rule configuration. Alert.Rule is typed as Metadata so downstream pipeline services (tuner, enricher, formatter, dispatcher) can read rule properties without needing an Evaluate capability. -type Metadata interface { - Id() string - Name() string - Description() string - Enabled() bool - FileName() string - DisplayName() string - References() []string - Severity() scoring.Severity - Confidence() scoring.Confidence - RiskScore() scoring.RiskScore - MergeByKeys() []string - MergeWindowMins() time.Duration - ReqSubkeys() []string - Signal() bool - SignalThreshold() scoring.Confidence - Tags() []string - Dispatchers() []string - LogTypes() []string - Observables() []Observables - Matchers() []string - Formatters() []string - Enrichments() []string - TuningRules() []string - Checksum() string - Version() string +// EvalResult is the per-event outcome returned by Rule.Evaluate. +// Fields beyond Matched are populated only when the plugin implements the +// corresponding optional capability interface (Titler, Describer, etc.). +// An empty/zero field means "use the YAML-configured default". +type EvalResult struct { + Matched bool + Title string + Description string + Severity string // "" = no override; "info"/"low"/"medium"/"high"/"critical" = override + Context map[string]any // extra key-value pairs merged into alert.Event + MergeByKeys []string // overrides YAML merge_by_keys when non-nil } -// Rule is the full interface for live rule plugins: metadata + batch evaluation. -// All rules receive a slice of events and return a matched bool per event. -// The SDK server handles looping over individual events on the subprocess side. +// Rule is the full interface for live rule plugins: config accessor + batch evaluation. +// All rules receive a slice of events and return one EvalResult per event. +// PluginMetadata + Checksum together satisfy plugin.Syncable. type Rule interface { - Metadata - Evaluate(ctx context.Context, evts []events.Event) ([]bool, errors.Error) -} - -// --- Optional capability interfaces --- -// Discovered via type assertion; not required by all implementations. - -// Generates a dynamic alert title from the triggering event. -type Titler interface { - AlertTitle(event events.Event) string -} - -// Generates a dynamic alert description from the triggering event. -type Describer interface { - AlertDescription(event events.Event) string -} - -// Returns keys used to deduplicate/merge related alerts. -type Deduper interface { - Dedup(event events.Event) []string -} - -// Computes a per-event severity (e.g. based on asset value). -type DynamicSeverity interface { - DynamicSeverity(event events.Event) scoring.Severity -} - -// Appends extra key-value context to the generated alert. -type ContextProvider interface { - AlertContext(event events.Event) map[string]any + RuleMetadata() *config.RuleMetadata + PluginMetadata() plugin.PluginMetadata + Checksum() string + Evaluate(ctx context.Context, evts []events.Event) ([]EvalResult, errors.Error) } - -// Guards rule evaluation until required event fields are present. -type SubKeyFilter interface{ SubKeysInEvent(event events.Event) bool } diff --git a/pkg/tuning_rules/manager.go b/pkg/tuning_rules/manager.go index 404b1c1..87e2b93 100644 --- a/pkg/tuning_rules/manager.go +++ b/pkg/tuning_rules/manager.go @@ -2,11 +2,12 @@ package tuning_rules import ( "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/pkg/tuning_rules/config" ) -var tuningManagerMetrics = pluginmgr.NewPluginManagerMetrics("tuning_rules") +var tuningManagerMetrics = plugin.NewPluginManagerMetrics("tuning_rules") -func NewManager(log *logger.Logger, notify pluginmgr.Notify, dir string) pluginmgr.Plugin { - return pluginmgr.NewPluginManager[TuningRule](log, notify, dir, &TuningRuleAdapter{}, tuningManagerMetrics) +func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[TuningRule] { + return plugin.NewPluginManager[TuningRule](log, notify, dir, &TuningRuleAdapter{Watcher: watcher}, tuningManagerMetrics) } diff --git a/pkg/tuning_rules/pool/pool.go b/pkg/tuning_rules/pool/pool.go index a34942c..62769b2 100644 --- a/pkg/tuning_rules/pool/pool.go +++ b/pkg/tuning_rules/pool/pool.go @@ -6,7 +6,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" @@ -30,7 +30,7 @@ func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Al ) { applies = make([]bool, len(alerts)) err := p.Call(ctx, tuningRuleID, canaryHashKey, func(callCtx context.Context, t tuning.TuningRule) error { - if !t.Enabled() { + if !t.TuningMetadata().Enabled() { return nil } ruleType = t.RuleType() @@ -47,11 +47,12 @@ func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Al // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering tuning rules in the pool. func poolKey(t tuning.TuningRule) internal.PoolKey { - version := t.Version() + cfg := t.TuningMetadata() + version := cfg.Version() if cs := t.Checksum(); cs != "" { version = version + "@" + cs } - return internal.PoolKey{PluginID: t.Id(), Version: version} + return internal.PoolKey{PluginID: cfg.Id(), Version: version} } func (p *Pool) Sync(msg messaging.Message) { @@ -59,13 +60,13 @@ func (p *Pool) Sync(msg messaging.Message) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case pluginmgr.RegisterMessage[tuning.TuningRule]: + case plugin.RegisterMessage[tuning.TuningRule]: register(nil, m.Items, m.MaxProcs) - case pluginmgr.UpdateMessage[tuning.TuningRule]: + case plugin.UpdateMessage[tuning.TuningRule]: register(m.OnDrained, m.Items, m.MaxProcs) - case pluginmgr.UnregisterMessage[tuning.TuningRule]: + case plugin.UnregisterMessage[tuning.TuningRule]: p.Unregister(m.ItemID) - case pluginmgr.RemoveMessage[tuning.TuningRule]: + case plugin.RemoveMessage[tuning.TuningRule]: p.Remove(m.ItemID) } } diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index eac6448..0620942 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -6,61 +6,80 @@ import ( "fmt" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" + "github.com/harishhary/blink/pkg/tuning_rules/config" "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rules" ) type rpcTuningRule struct { - meta *rpc_tuning_rules.TuningMetadata - checksum string - client rpc_tuning_rules.TuningRuleClient + cfgWatcher *config.Watcher + fileName string + checksum string + client rpc_tuning_rules.TuningRuleClient } -func newRpcTuningRule(meta *rpc_tuning_rules.TuningMetadata, client rpc_tuning_rules.TuningRuleClient, checksum string) *rpcTuningRule { - return &rpcTuningRule{meta: meta, checksum: checksum, client: client} -} - -func (r *rpcTuningRule) Id() string { - if id := r.meta.GetId(); id != "" { - return id +func newRpcTuningRule(fileName string, client rpc_tuning_rules.TuningRuleClient, watcher *config.Watcher, checksum string) *rpcTuningRule { + return &rpcTuningRule{ + cfgWatcher: watcher, + fileName: fileName, + checksum: checksum, + client: client, } - return r.meta.GetName() -} -func (r *rpcTuningRule) Name() string { - return r.meta.GetName() -} - -func (r *rpcTuningRule) Description() string { - return r.meta.GetDescription() } -func (r *rpcTuningRule) Enabled() bool { - return r.meta.GetEnabled() +func (r *rpcTuningRule) cfg() *config.TuningMetadata { + if r.cfgWatcher == nil { + return nil + } + return r.cfgWatcher.Current().ByFileName(r.fileName) } -func (r *rpcTuningRule) Version() string { - return r.meta.GetVersion() +// TuningMetadata returns the live YAML-derived tuning rule configuration. +func (r *rpcTuningRule) TuningMetadata() *config.TuningMetadata { + if c := r.cfg(); c != nil { + return c + } + return &config.TuningMetadata{FileNameField: r.fileName} } -func (r *rpcTuningRule) Checksum() string { - return r.checksum +func (r *rpcTuningRule) PluginMetadata() plugin.PluginMetadata { + c := r.TuningMetadata() + return plugin.PluginMetadata{ + ID: c.Id(), + Name: c.Name(), + Description: c.Description(), + Enabled: c.Enabled(), + Version: c.Version(), + } } +func (r *rpcTuningRule) Checksum() string { return r.checksum } func (r *rpcTuningRule) String() string { - return fmt.Sprintf("TuningRule '%s' (id:%s, enabled:%t)", r.meta.GetName(), r.meta.GetId(), r.meta.GetEnabled()) + c := r.TuningMetadata() + return fmt.Sprintf("TuningRule '%s' (id:%s, enabled:%t)", c.Name(), c.Id(), c.Enabled()) } -func (r *rpcTuningRule) Global() bool { - return r.meta.GetGlobal() -} +func (r *rpcTuningRule) Global() bool { return r.TuningMetadata().Global() } +// RuleType parses the YAML rule_type string into a typed RuleType constant. func (r *rpcTuningRule) RuleType() RuleType { - return RuleType(r.meta.GetRuleType()) + switch r.TuningMetadata().RuleTypeStr() { + case "set_confidence": + return SetConfidence + case "increase_confidence": + return IncreaseConfidence + case "decrease_confidence": + return DecreaseConfidence + default: + return Ignore + } } +// Confidence parses the YAML confidence string into a scoring.Confidence value. func (r *rpcTuningRule) Confidence() scoring.Confidence { - conf, _ := scoring.ParseConfidence(r.meta.GetConfidence()) + conf, _ := scoring.ParseConfidence(r.TuningMetadata().ConfidenceStr()) return conf } diff --git a/pkg/tuning_rules/sdk/serve.go b/pkg/tuning_rules/sdk/serve.go index ba3fde0..e7f4029 100644 --- a/pkg/tuning_rules/sdk/serve.go +++ b/pkg/tuning_rules/sdk/serve.go @@ -18,64 +18,33 @@ const ( MagicValue = "tuning_rule_v1" ) -// TuningMetadata holds the static properties returned by TuningRulePlugin.Metadata(). -type TuningMetadata struct { - ID string - Name string - Description string - Enabled bool - Global bool - RuleType int32 // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence - Confidence string -} - +// TuningRulePlugin is the interface that all tuning rule plugin binaries must implement. +// Embed sdk.BaseTuningRule to get no-op defaults for Init and Shutdown. +// +// All static metadata (name, id, enabled, global, rule_type, confidence, etc.) lives in +// the YAML sidecar file alongside the binary — the subprocess owns only tuning logic. type TuningRulePlugin interface { - Metadata() TuningMetadata Init() error Tune(ctx context.Context, alert map[string]any) (bool, errors.Error) Shutdown() error } +// BaseTuningRule provides no-op defaults for Init and Shutdown. Embed in your rule struct. type BaseTuningRule struct{} func (BaseTuningRule) Init() error { return nil } func (BaseTuningRule) Shutdown() error { return nil } -// server wraps a TuningRulePlugin and serve the gRPC TuningRuleServer interface. +// server wraps a TuningRulePlugin and serves the gRPC TuningRuleServer interface. type server struct { rpc_tuning_rules.UnimplementedTuningRuleServer rule TuningRulePlugin } -func (s *server) GetMetadata(_ context.Context, _ *rpc_tuning_rules.Empty) (*rpc_tuning_rules.TuningMetadata, error) { - m := s.rule.Metadata() - return &rpc_tuning_rules.TuningMetadata{ - Id: m.ID, - Name: m.Name, - Description: m.Description, - Enabled: m.Enabled, - Global: m.Global, - RuleType: m.RuleType, - Confidence: m.Confidence, - }, nil -} - func (s *server) Init(_ context.Context, _ *rpc_tuning_rules.Empty) (*rpc_tuning_rules.Empty, error) { return &rpc_tuning_rules.Empty{}, s.rule.Init() } -func (s *server) Tune(ctx context.Context, req *rpc_tuning_rules.TuneRequest) (*rpc_tuning_rules.TuneResponse, error) { - var alert map[string]any - if err := json.Unmarshal(req.GetAlertJson(), &alert); err != nil { - return nil, err - } - applies, err := s.rule.Tune(ctx, alert) - if err != nil { - return nil, err - } - return &rpc_tuning_rules.TuneResponse{Applies: applies}, nil -} - func (s *server) TuneBatch(ctx context.Context, req *rpc_tuning_rules.TuneBatchRequest) (*rpc_tuning_rules.TuneBatchResponse, error) { results := make([]bool, 0, len(req.GetAlertJson())) for _, raw := range req.GetAlertJson() { diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index 4392149..7ac56ac 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -4,10 +4,16 @@ import ( "context" "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" + "github.com/harishhary/blink/pkg/tuning_rules/config" ) +// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to +// import an internal package. +type PluginMetadata = plugin.PluginMetadata + type RuleType int const ( @@ -17,23 +23,11 @@ const ( DecreaseConfidence ) -func IsValidRuleType(ruleType RuleType) bool { - switch ruleType { - case Ignore, SetConfidence, IncreaseConfidence, DecreaseConfidence: - return true - default: - return false - } -} - type TuningRule interface { Tune(ctx context.Context, alerts []alerts.Alert) ([]bool, errors.Error) - Id() string - Name() string - Description() string - Enabled() bool - Version() string + TuningMetadata() *config.TuningMetadata + PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable Global() bool RuleType() RuleType Confidence() scoring.Confidence From 425060890351d0fdeaae2f6743a63baeaffea82a Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Tue, 24 Mar 2026 13:16:02 +0100 Subject: [PATCH 08/14] big refactors on metadata struct --- internal/backends/athena/athena.go | 4 +- internal/backends/dynamodb/dynamodb.go | 4 +- internal/backends/elastic/elastic.go | 4 +- internal/backends/sqllite/sqllite.go | 10 +- pkg/alerts/alert.go | 10 +- pkg/alerts/convert.go | 31 +++--- pkg/enrichments/config/config.go | 110 +++++++++++++++++++ pkg/enrichments/enrichment.go | 5 +- pkg/enrichments/launcher.go | 27 ++--- pkg/enrichments/pool/pool.go | 12 +-- pkg/enrichments/rpc_enrichment.go | 14 +-- pkg/formatters/config/config.go | 70 ++++++++++++ pkg/formatters/formatter.go | 7 +- pkg/formatters/launcher.go | 27 ++--- pkg/formatters/pool/pool.go | 12 +-- pkg/formatters/rpc_formatter.go | 19 ++-- pkg/matchers/config/config.go | 72 +++++++++++++ pkg/matchers/launcher.go | 75 +++++++++---- pkg/matchers/matcher.go | 7 +- pkg/matchers/pool/pool.go | 12 +-- pkg/matchers/rpc_matcher.go | 21 ++-- pkg/rules/config/config.go | 144 ++++++++++--------------- pkg/rules/helpers.go | 2 +- pkg/rules/launcher.go | 13 ++- pkg/rules/manager_test.go | 14 +-- pkg/rules/pool/pool.go | 12 +-- pkg/rules/rpc_rules.go | 6 +- pkg/rules/rule.go | 7 +- pkg/rules/sdk/serve.go | 86 ++++++++++----- pkg/tuning_rules/config/config.go | 76 +++++++++++++ pkg/tuning_rules/launcher.go | 75 +++++++++---- pkg/tuning_rules/pool/pool.go | 12 +-- pkg/tuning_rules/rpc_tuning_rule.go | 26 ++--- pkg/tuning_rules/tuning_rule.go | 7 +- 34 files changed, 699 insertions(+), 334 deletions(-) create mode 100644 pkg/enrichments/config/config.go create mode 100644 pkg/formatters/config/config.go create mode 100644 pkg/matchers/config/config.go create mode 100644 pkg/tuning_rules/config/config.go diff --git a/internal/backends/athena/athena.go b/internal/backends/athena/athena.go index d569696..b1e86ad 100644 --- a/internal/backends/athena/athena.go +++ b/internal/backends/athena/athena.go @@ -203,7 +203,7 @@ func (a *AthenaBackend) ToAlert(record backends.Record) (*alerts.Alert, error) { func (a *AthenaBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { record := backends.Record{ - "RuleName": alert.Rule.Name(), + "RuleName": alert.Rule.Name, "AlertID": alert.AlertID, "Attempts": alert.Attempts, "Cluster": alert.Cluster, @@ -217,7 +217,7 @@ func (a *AthenaBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { "OutputsSent": alert.OutputsSent, "Formatters": alert.Rule.Formatters(), "Event": helpers.JsonCompact(alert.Event), - "RuleDescription": alert.Rule.Description(), + "RuleDescription": alert.Rule.Description, "SourceEntity": alert.SourceEntity, "SourceService": alert.SourceService, "Staged": alert.Staged, diff --git a/internal/backends/dynamodb/dynamodb.go b/internal/backends/dynamodb/dynamodb.go index d3d6343..6dbe5a8 100644 --- a/internal/backends/dynamodb/dynamodb.go +++ b/internal/backends/dynamodb/dynamodb.go @@ -326,7 +326,7 @@ func (at *DynamoDBBackend) ToAlert(record backends.Record) (*alerts.Alert, error func (at *DynamoDBBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { item, err := attributevalue.MarshalMap(backends.Record{ - "RuleName": alert.Rule.Name(), // Partition Key + "RuleName": alert.Rule.Name, // Partition Key "AlertID": alert.AlertID, // Sort/Range Key "Attempts": alert.Attempts, "Cluster": alert.Cluster, @@ -340,7 +340,7 @@ func (at *DynamoDBBackend) ToRecord(alert *alerts.Alert) (backends.Record, error "OutputsSent": alert.OutputsSent, "Formatters": alert.Rule.Formatters(), "Event": helpers.JsonCompact(alert.Event), - "RuleDescription": alert.Rule.Description(), + "RuleDescription": alert.Rule.Description, "SourceEntity": alert.SourceEntity, "SourceService": alert.SourceService, "Staged": alert.Staged, diff --git a/internal/backends/elastic/elastic.go b/internal/backends/elastic/elastic.go index a9debb3..dc0b950 100644 --- a/internal/backends/elastic/elastic.go +++ b/internal/backends/elastic/elastic.go @@ -391,7 +391,7 @@ func (es *ElasticsearchBackend) ToAlert(record backends.Record) (*alerts.Alert, func (es *ElasticsearchBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { record := backends.Record{ - "RuleName": alert.Rule.Name(), // Partition Key + "RuleName": alert.Rule.Name, // Partition Key "AlertID": alert.AlertID, // Sort/Range Key "Attempts": alert.Attempts, "Cluster": alert.Cluster, @@ -405,7 +405,7 @@ func (es *ElasticsearchBackend) ToRecord(alert *alerts.Alert) (backends.Record, "OutputsSent": alert.OutputsSent, "Formatters": alert.Rule.Formatters(), "Event": helpers.JsonCompact(alert.Event), - "RuleDescription": alert.Rule.Description(), + "RuleDescription": alert.Rule.Description, "SourceEntity": alert.SourceEntity, "SourceService": alert.SourceService, "Staged": alert.Staged, diff --git a/internal/backends/sqllite/sqllite.go b/internal/backends/sqllite/sqllite.go index dfecc9c..0d058da 100644 --- a/internal/backends/sqllite/sqllite.go +++ b/internal/backends/sqllite/sqllite.go @@ -229,7 +229,7 @@ func (s *SQLiteBackend) DeleteAlerts(alerts []*alerts.Alert) error { defer stmt.Close() for _, alert := range alerts { - _, err := stmt.ExecContext(s.Ctx, alert.Rule.Name(), alert.AlertID) + _, err := stmt.ExecContext(s.Ctx, alert.Rule.Name, alert.AlertID) if err != nil { tx.Rollback() return fmt.Errorf("error executing delete: %w", err) @@ -245,7 +245,7 @@ func (s *SQLiteBackend) DeleteAlerts(alerts []*alerts.Alert) error { func (s *SQLiteBackend) UpdateSentOutputs(alert *alerts.Alert) error { query := `UPDATE alerts SET OutputsSent = ? WHERE RuleName = ? AND AlertID = ?` - _, err := s.Db.ExecContext(s.Ctx, query, alert.OutputsSent, alert.Rule.Name(), alert.AlertID) + _, err := s.Db.ExecContext(s.Ctx, query, alert.OutputsSent, alert.Rule.Name, alert.AlertID) if err != nil { return fmt.Errorf("error updating item: %w", err) } @@ -254,7 +254,7 @@ func (s *SQLiteBackend) UpdateSentOutputs(alert *alerts.Alert) error { func (s *SQLiteBackend) MarkAsDispatched(alert *alerts.Alert) error { query := `UPDATE alerts SET Attempts = ?, Dispatched = ? WHERE RuleName = ? AND AlertID = ?` - _, err := s.Db.ExecContext(s.Ctx, query, alert.Attempts, alert.Dispatched.Format(helpers.DATETIME_FORMAT), alert.Rule.Name(), alert.AlertID) + _, err := s.Db.ExecContext(s.Ctx, query, alert.Attempts, alert.Dispatched.Format(helpers.DATETIME_FORMAT), alert.Rule.Name, alert.AlertID) if err != nil { return fmt.Errorf("error updating item: %w", err) } @@ -298,7 +298,7 @@ func (s *SQLiteBackend) ToAlert(record backends.Record) (*alerts.Alert, error) { func (s *SQLiteBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { record := backends.Record{ - "RuleName": alert.Rule.Name(), + "RuleName": alert.Rule.Name, "AlertID": alert.AlertID, "Attempts": alert.Attempts, "Cluster": alert.Cluster, @@ -312,7 +312,7 @@ func (s *SQLiteBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { "OutputsSent": alert.OutputsSent, "Formatters": alert.Rule.Formatters(), "Event": helpers.JsonCompact(alert.Event), - "RuleDescription": alert.Rule.Description(), + "RuleDescription": alert.Rule.Description, "SourceEntity": alert.SourceEntity, "SourceService": alert.SourceService, "Staged": alert.Staged, diff --git a/pkg/alerts/alert.go b/pkg/alerts/alert.go index 19f38dc..b401d6d 100644 --- a/pkg/alerts/alert.go +++ b/pkg/alerts/alert.go @@ -162,8 +162,8 @@ func (a *Alert) OutputDict() map[string]any { "outputs": a.Rule.Dispatchers(), "formatters": a.Rule.Formatters(), "event": a.Event, - "rule_description": a.Rule.Description(), - "rule_name": a.Rule.Name(), + "rule_description": a.Rule.Description, + "rule_name": a.Rule.Name, "source_entity": a.SourceEntity, "source_service": a.SourceService, "staged": a.Staged, @@ -173,7 +173,7 @@ func (a *Alert) OutputDict() map[string]any { // Returns a simple representation of the alert func (a *Alert) String() string { - return fmt.Sprintf("", a.AlertID, a.Rule.Name()) + return fmt.Sprintf("", a.AlertID, a.Rule.Name) } // Returns a detailed representation of the alert @@ -230,7 +230,7 @@ func (a *Alert) MergePartitionKey() string { sort.Strings(keys) merged := a.Event.GetMergedKeys(keys) parts := make([]string, 0, len(keys)+1) - parts = append(parts, a.Rule.Name()) + parts = append(parts, a.Rule.Name) for _, k := range keys { parts = append(parts, fmt.Sprintf("%v", merged[k])) } @@ -249,7 +249,7 @@ func (a *Alert) RemainingOutputs(requiredOutputs []string) []string { func (a *Alert) RecordKey() map[string]string { key := map[string]string{ - "RuleName": a.Rule.Name(), + "RuleName": a.Rule.Name, "AlertID": a.AlertID, } return key diff --git a/pkg/alerts/convert.go b/pkg/alerts/convert.go index d574520..d9bbfae 100644 --- a/pkg/alerts/convert.go +++ b/pkg/alerts/convert.go @@ -3,6 +3,7 @@ package alerts import ( "time" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts/pb" "github.com/harishhary/blink/pkg/events" "github.com/harishhary/blink/pkg/rules/config" @@ -94,10 +95,10 @@ func ruleToProto(r *config.RuleMetadata) *pb.RuleMetadata { return nil } return &pb.RuleMetadata{ - Id: r.Id(), - Name: r.Name(), - Description: r.Description(), - Enabled: r.Enabled(), + Id: r.Id, + Name: r.Name, + Description: r.Description, + Enabled: r.Enabled, Severity: r.Severity().String(), Confidence: r.Confidence().String(), MergeByKeys: r.MergeByKeys(), @@ -112,9 +113,9 @@ func ruleToProto(r *config.RuleMetadata) *pb.RuleMetadata { Formatters: r.Formatters(), Enrichments: r.Enrichments(), TuningRules: r.TuningRules(), - Version: r.Version(), - FileName: r.FileName(), - DisplayName: r.DisplayName(), + Version: r.Version, + FileName: r.FileName, + DisplayName: r.DisplayName, References: r.References(), } } @@ -125,13 +126,15 @@ func protoToRuleMetadata(m *pb.RuleMetadata) *config.RuleMetadata { return &config.RuleMetadata{} } cfg, _ := config.New(config.RuleMetadata{ - IdField: m.GetId(), - NameField: m.GetName(), - DisplayNameField: m.GetDisplayName(), - DescriptionField: m.GetDescription(), - EnabledField: m.GetEnabled(), - VersionField: m.GetVersion(), - FileNameField: m.GetFileName(), + PluginMetadata: plugin.PluginMetadata{ + Id: m.GetId(), + Name: m.GetName(), + DisplayName: m.GetDisplayName(), + Description: m.GetDescription(), + Enabled: m.GetEnabled(), + Version: m.GetVersion(), + FileName: m.GetFileName(), + }, SeverityStr: m.GetSeverity(), ConfidenceStr: m.GetConfidence(), SignalThresholdStr: m.GetSignalThreshold(), diff --git a/pkg/enrichments/config/config.go b/pkg/enrichments/config/config.go new file mode 100644 index 0000000..5b02beb --- /dev/null +++ b/pkg/enrichments/config/config.go @@ -0,0 +1,110 @@ +// Each enrichment binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440000" +// name: "geoip" +// display_name: "GeoIP Enrichment" +// description: "Adds geographic location data to events." +// enabled: true +// version: "1.0.0" +// file_name: "geoip" +// depends_on: ["other-enrichment"] +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/harishhary/blink/internal/plugin" + "go.yaml.in/yaml/v4" +) + +// EnrichmentMetadata is the in-memory representation of an enrichment YAML sidecar. +type EnrichmentMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + DependsOn []string `yaml:"depends_on"` +} + +// loader implements plugin.Loader[*EnrichmentMetadata]. +type loader struct{} + +func (loader) Load(path string) (*EnrichmentMetadata, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("enrichment config: read %s: %w", path, err) + } + var cfg EnrichmentMetadata + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("enrichment config: parse %s: %w", path, err) + } + if cfg.Name == "" { + return nil, fmt.Errorf("enrichment config: %s: name is required", path) + } + if cfg.FileName == "" { + base := filepath.Base(path) + cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) + } + if cfg.Id == "" { + cfg.Id = cfg.FileName + } + return &cfg, nil +} + +func (loader) Validate(all []*EnrichmentMetadata) error { + index := make(map[string]*EnrichmentMetadata, len(all)) + for _, e := range all { + index[e.Name] = e + } + const ( + unvisited = iota + inProgress + done + ) + state := make(map[string]int, len(all)) + var visit func(name string, path []string) error + visit = func(name string, path []string) error { + switch state[name] { + case done: + return nil + case inProgress: + return fmt.Errorf("enrichment config: dependency cycle detected: %v → %s", path, name) + } + state[name] = inProgress + e, ok := index[name] + if !ok { + return fmt.Errorf("enrichment config: %q depends on unknown enrichment %q", path[len(path)-1], name) + } + for _, dep := range e.DependsOn { + if err := visit(dep, append(path, name)); err != nil { + return err + } + } + state[name] = done + return nil + } + for _, e := range all { + if err := visit(e.Name, []string{}); err != nil { + return err + } + } + return nil +} + +// Registry and Watcher are the generic implementations parameterised for enrichments. +type Registry = plugin.Registry[*EnrichmentMetadata] +type Watcher = plugin.Watcher[*EnrichmentMetadata] + +func NewRegistry(dir string) (*Registry, error) { + return plugin.NewRegistry(dir, "enrichment", loader{}) +} + +func NewWatcher(dir string) (*Watcher, error) { + return plugin.NewWatcher("enrichment-config-watcher", dir, "enrichment", loader{}) +} diff --git a/pkg/enrichments/enrichment.go b/pkg/enrichments/enrichment.go index 5fe3913..c7198b0 100644 --- a/pkg/enrichments/enrichment.go +++ b/pkg/enrichments/enrichment.go @@ -12,6 +12,7 @@ import ( // PluginMetadata is re-exported from internal/plugin so plugin authors don't need to // import an internal package. type PluginMetadata = plugin.PluginMetadata +type EnrichmentMetadata = config.EnrichmentMetadata type Enrichment interface { Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error @@ -19,8 +20,8 @@ type Enrichment interface { // Populated from the YAML sidecar depends_on field. DependsOn() []string - EnrichmentMetadata() *config.EnrichmentMetadata - PluginMetadata() PluginMetadata // satisfies plugin.Syncable + EnrichmentMetadata() *EnrichmentMetadata + Metadata() PluginMetadata // satisfies plugin.Syncable Checksum() string String() string } diff --git a/pkg/enrichments/launcher.go b/pkg/enrichments/launcher.go index 6614e05..4e2af2a 100644 --- a/pkg/enrichments/launcher.go +++ b/pkg/enrichments/launcher.go @@ -41,42 +41,43 @@ func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, binP } e := newRpcEnrichment(fileName, rpc, l.Watcher, hash) - cfg := l.Watcher.Current().ByFileName(fileName) + cfg, ok := l.Watcher.Current().ByFileName(fileName) id, name := fileName, fileName - if cfg != nil { - id = cfg.Id() - name = cfg.Name() + if ok { + id = cfg.Id + name = cfg.Name } return e, &enrichmentLifecycle{rpc: rpc}, id, name, nil } // IsReady reports whether this binary's YAML sidecar exists in the current registry. func (l *EnrichmentAdapter) IsReady(binPath string) bool { - return l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) != nil + _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. func (l *EnrichmentAdapter) IsShadow(binPath string) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { return false } - m := cfg.RolloutMode() + m := cfg.RolloutMode return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow } // IsEnabled reports whether the enrichment's YAML sidecar still exists and is enabled. func (l *EnrichmentAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) - return cfg != nil && cfg.Enabled() + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return ok && cfg.Enabled } func (l *EnrichmentAdapter) Workers(binPath string) int { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil || cfg.MaxProcs() <= 0 { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok || cfg.MaxProcs <= 0 { return 1 } - return cfg.MaxProcs() + return cfg.MaxProcs } type enrichmentLifecycle struct { diff --git a/pkg/enrichments/pool/pool.go b/pkg/enrichments/pool/pool.go index 2f25f70..c3822ce 100644 --- a/pkg/enrichments/pool/pool.go +++ b/pkg/enrichments/pool/pool.go @@ -28,7 +28,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts.Alert, canaryHashKey string) (absent bool, removed bool, errs []errors.Error) { errs = make([]errors.Error, len(alerts)) err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e enrichments.Enrichment) error { - if !e.EnrichmentMetadata().Enabled() { + if !e.EnrichmentMetadata().Enabled { return nil } if err := e.Enrich(callCtx, alerts); err != nil { @@ -52,11 +52,7 @@ func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts func poolKey(e enrichments.Enrichment) internal.PoolKey { cfg := e.EnrichmentMetadata() - version := cfg.Version() - if cs := e.Checksum(); cs != "" { - version = version + "@" + cs - } - return internal.PoolKey{PluginID: cfg.Id(), Version: version} + return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: e.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { @@ -69,8 +65,8 @@ func (p *Pool) Sync(msg messaging.Message) { case plugin.UpdateMessage[enrichments.Enrichment]: register(m.OnDrained, m.Items, m.MaxProcs) case plugin.UnregisterMessage[enrichments.Enrichment]: - p.Unregister(m.ItemID) + p.Unregister(m.ItemKey) case plugin.RemoveMessage[enrichments.Enrichment]: - p.Remove(m.ItemID) + p.Remove(m.ItemKey) } } diff --git a/pkg/enrichments/rpc_enrichment.go b/pkg/enrichments/rpc_enrichment.go index 6a30297..13d9003 100644 --- a/pkg/enrichments/rpc_enrichment.go +++ b/pkg/enrichments/rpc_enrichment.go @@ -31,7 +31,8 @@ func (r *rpcEnrichment) cfg() *config.EnrichmentMetadata { if r.cfgWatcher == nil { return nil } - return r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + return v } // EnrichmentMetadata returns the live YAML-derived enrichment configuration. @@ -39,17 +40,18 @@ func (r *rpcEnrichment) EnrichmentMetadata() *config.EnrichmentMetadata { if c := r.cfg(); c != nil { return c } - return &config.EnrichmentMetadata{FileNameField: r.fileName} + return &config.EnrichmentMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} } -func (r *rpcEnrichment) PluginMetadata() plugin.PluginMetadata { - return r.EnrichmentMetadata().PluginMetadata() +func (r *rpcEnrichment) Metadata() plugin.PluginMetadata { + return r.EnrichmentMetadata().Metadata() } -func (r *rpcEnrichment) DependsOn() []string { return r.EnrichmentMetadata().DependsOn() } +func (r *rpcEnrichment) DependsOn() []string { return r.EnrichmentMetadata().DependsOn } func (r *rpcEnrichment) Checksum() string { return r.checksum } func (r *rpcEnrichment) String() string { - return "RpcEnrichment '" + r.EnrichmentMetadata().Name() + "' id:'" + r.EnrichmentMetadata().Id() + "'" + m := r.EnrichmentMetadata().Metadata() + return "RpcEnrichment '" + m.Name + "' id:'" + m.Id + "'" } func (r *rpcEnrichment) Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error { diff --git a/pkg/formatters/config/config.go b/pkg/formatters/config/config.go new file mode 100644 index 0000000..f02624c --- /dev/null +++ b/pkg/formatters/config/config.go @@ -0,0 +1,70 @@ +// Each formatter binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440001" +// name: "json-summary" +// display_name: "JSON Summary Formatter" +// description: "Formats alert data as a structured JSON summary." +// enabled: true +// version: "1.0.0" +// file_name: "json-summary" +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/harishhary/blink/internal/plugin" + "go.yaml.in/yaml/v4" +) + +// FormatterMetadata is the in-memory representation of a formatter YAML sidecar. +type FormatterMetadata struct { + plugin.PluginMetadata `yaml:",inline"` +} + +// loader implements plugin.Loader[*FormatterMetadata]. +type loader struct{} + +func (loader) Load(path string) (*FormatterMetadata, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("formatter config: read %s: %w", path, err) + } + var cfg FormatterMetadata + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("formatter config: parse %s: %w", path, err) + } + if cfg.Name == "" { + return nil, fmt.Errorf("formatter config: %s: name is required", path) + } + if cfg.FileName == "" { + base := filepath.Base(path) + cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) + } + if cfg.Id == "" { + cfg.Id = cfg.FileName + } + return &cfg, nil +} + +func (loader) Validate(all []*FormatterMetadata) error { return nil } + +// Registry and Watcher are the generic implementations parameterised for formatters. +type Registry = plugin.Registry[*FormatterMetadata] +type Watcher = plugin.Watcher[*FormatterMetadata] + +func NewRegistry(dir string) (*Registry, error) { + return plugin.NewRegistry(dir, "formatter", loader{}) +} + +func NewWatcher(dir string) (*Watcher, error) { + return plugin.NewWatcher("formatter-config-watcher", dir, "formatter", loader{}) +} diff --git a/pkg/formatters/formatter.go b/pkg/formatters/formatter.go index 8a3ca18..cb7c0df 100644 --- a/pkg/formatters/formatter.go +++ b/pkg/formatters/formatter.go @@ -9,15 +9,14 @@ import ( "github.com/harishhary/blink/pkg/formatters/config" ) -// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to -// import an internal package. type PluginMetadata = plugin.PluginMetadata +type FormatterMetadata = config.FormatterMetadata type Formatter interface { Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) - FormatterMetadata() *config.FormatterMetadata - PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable + FormatterMetadata() *FormatterMetadata + Metadata() PluginMetadata Checksum() string String() string } diff --git a/pkg/formatters/launcher.go b/pkg/formatters/launcher.go index 4b9b7b1..fb086d9 100644 --- a/pkg/formatters/launcher.go +++ b/pkg/formatters/launcher.go @@ -42,42 +42,43 @@ func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, binPa } f := newRpcFormatter(fileName, rpc, l.Watcher, hash) - cfg := l.Watcher.Current().ByFileName(fileName) + cfg, ok := l.Watcher.Current().ByFileName(fileName) id, name := fileName, fileName - if cfg != nil { - id = cfg.Id() - name = cfg.Name() + if ok { + id = cfg.Id + name = cfg.Name } return f, &formatterLifecycle{rpc: rpc}, id, name, nil } // IsReady reports whether this binary's YAML sidecar exists in the current registry. func (l *FormatterAdapter) IsReady(binPath string) bool { - return l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) != nil + _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. func (l *FormatterAdapter) IsShadow(binPath string) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { return false } - m := cfg.RolloutMode() + m := cfg.RolloutMode return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow } // IsEnabled reports whether the formatter's YAML sidecar still exists and is enabled. func (l *FormatterAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) - return cfg != nil && cfg.Enabled() + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return ok && cfg.Enabled } func (l *FormatterAdapter) Workers(binPath string) int { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil || cfg.MaxProcs() <= 0 { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok || cfg.MaxProcs <= 0 { return 1 } - return cfg.MaxProcs() + return cfg.MaxProcs } type formatterLifecycle struct { diff --git a/pkg/formatters/pool/pool.go b/pkg/formatters/pool/pool.go index 6e192d8..ef79d34 100644 --- a/pkg/formatters/pool/pool.go +++ b/pkg/formatters/pool/pool.go @@ -31,7 +31,7 @@ func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts. outs = make([]map[string]any, len(alerts)) errs = make([]errors.Error, len(alerts)) err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f formatters.Formatter) error { - if !f.FormatterMetadata().Enabled() { + if !f.FormatterMetadata().Enabled { return nil } batchOuts, e := f.Format(callCtx, alerts) @@ -58,11 +58,7 @@ func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts. func poolKey(f formatters.Formatter) internal.PoolKey { cfg := f.FormatterMetadata() - version := cfg.Version() - if cs := f.Checksum(); cs != "" { - version = version + "@" + cs - } - return internal.PoolKey{PluginID: cfg.Id(), Version: version} + return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: f.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { @@ -75,8 +71,8 @@ func (p *Pool) Sync(msg messaging.Message) { case plugin.UpdateMessage[formatters.Formatter]: register(m.OnDrained, m.Items, m.MaxProcs) case plugin.UnregisterMessage[formatters.Formatter]: - p.Unregister(m.ItemID) + p.Unregister(m.ItemKey) case plugin.RemoveMessage[formatters.Formatter]: - p.Remove(m.ItemID) + p.Remove(m.ItemKey) } } diff --git a/pkg/formatters/rpc_formatter.go b/pkg/formatters/rpc_formatter.go index 2157e74..5c089fd 100644 --- a/pkg/formatters/rpc_formatter.go +++ b/pkg/formatters/rpc_formatter.go @@ -32,7 +32,8 @@ func (f *rpcFormatter) cfg() *config.FormatterMetadata { if f.cfgWatcher == nil { return nil } - return f.cfgWatcher.Current().ByFileName(f.fileName) + v, _ := f.cfgWatcher.Current().ByFileName(f.fileName) + return v } // FormatterMetadata returns the live YAML-derived formatter configuration. @@ -40,23 +41,17 @@ func (f *rpcFormatter) FormatterMetadata() *config.FormatterMetadata { if c := f.cfg(); c != nil { return c } - return &config.FormatterMetadata{FileNameField: f.fileName} + return &config.FormatterMetadata{PluginMetadata: plugin.PluginMetadata{Id: f.fileName, Name: f.fileName, FileName: f.fileName}} } -func (f *rpcFormatter) PluginMetadata() plugin.PluginMetadata { - c := f.FormatterMetadata() - return plugin.PluginMetadata{ - ID: c.Id(), - Name: c.Name(), - Description: c.Description(), - Enabled: c.Enabled(), - Version: c.Version(), - } +func (f *rpcFormatter) Metadata() plugin.PluginMetadata { + return f.FormatterMetadata().Metadata() } func (f *rpcFormatter) Checksum() string { return f.checksum } func (f *rpcFormatter) String() string { - return fmt.Sprintf("Formatter '%s' (id:%s)", f.FormatterMetadata().Name(), f.FormatterMetadata().Id()) + m := f.FormatterMetadata().Metadata() + return fmt.Sprintf("Formatter '%s' (id:%s)", m.Name, m.Id) } func (f *rpcFormatter) Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) { diff --git a/pkg/matchers/config/config.go b/pkg/matchers/config/config.go new file mode 100644 index 0000000..47a9477 --- /dev/null +++ b/pkg/matchers/config/config.go @@ -0,0 +1,72 @@ +// Each matcher binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440002" +// name: "prod-accounts" +// display_name: "Production Accounts Matcher" +// description: "Matches events from production AWS accounts." +// enabled: true +// version: "1.0.0" +// file_name: "prod-accounts" +// global: false +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/harishhary/blink/internal/plugin" + "go.yaml.in/yaml/v4" +) + +// MatcherMetadata is the in-memory representation of a matcher YAML sidecar. +type MatcherMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + Global bool `yaml:"global"` +} + +// loader implements plugin.Loader[*MatcherMetadata]. +type loader struct{} + +func (loader) Load(path string) (*MatcherMetadata, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("matcher config: read %s: %w", path, err) + } + var cfg MatcherMetadata + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("matcher config: parse %s: %w", path, err) + } + if cfg.Name == "" { + return nil, fmt.Errorf("matcher config: %s: name is required", path) + } + if cfg.FileName == "" { + base := filepath.Base(path) + cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) + } + if cfg.Id == "" { + cfg.Id = cfg.FileName + } + return &cfg, nil +} + +func (loader) Validate(all []*MatcherMetadata) error { return nil } + +// Registry and Watcher are the generic implementations parameterised for matchers. +type Registry = plugin.Registry[*MatcherMetadata] +type Watcher = plugin.Watcher[*MatcherMetadata] + +func NewRegistry(dir string) (*Registry, error) { + return plugin.NewRegistry(dir, "matcher", loader{}) +} + +func NewWatcher(dir string) (*Watcher, error) { + return plugin.NewWatcher("matcher-config-watcher", dir, "matcher", loader{}) +} diff --git a/pkg/matchers/launcher.go b/pkg/matchers/launcher.go index ab6bca3..769ab39 100644 --- a/pkg/matchers/launcher.go +++ b/pkg/matchers/launcher.go @@ -5,49 +5,80 @@ import ( "fmt" "time" - plugin "github.com/hashicorp/go-plugin" + goplugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/helpers" + "github.com/harishhary/blink/internal/plugin" + internal "github.com/harishhary/blink/internal/pools" + "github.com/harishhary/blink/pkg/matchers/config" "github.com/harishhary/blink/pkg/matchers/rpc_matchers" ) -type MatcherAdapter struct{} +type MatcherAdapter struct { + Watcher *config.Watcher +} func (l *MatcherAdapter) PluginKey() string { return "matcher" } func (l *MatcherAdapter) MagicValue() string { return "matcher_v1" } -func (l *MatcherAdapter) GRPCPlugin() plugin.Plugin { return &matcherPlugin{} } +func (l *MatcherAdapter) GRPCPlugin() goplugin.Plugin { return &matcherPlugin{} } -func (l *MatcherAdapter) Handshake(ctx context.Context, raw interface{}, _ string, hash string) (Matcher, pluginmgr.PluginLifecycle, string, string, error) { +// Handshake connects to the matcher subprocess, calls Init, and returns a +// ready rpcMatcher. Identity comes from the YAML sidecar, not from a GetMetadata RPC. +func (l *MatcherAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Matcher, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_matchers.MatcherClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) } - metaCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - meta, err := rpc.GetMetadata(metaCtx, &rpc_matchers.Empty{}) - cancel() - if err != nil { - return nil, nil, "", "", fmt.Errorf("metadata: %w", err) - } + fileName := helpers.BinaryBaseName(binPath) initCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err = rpc.Init(initCtx, &rpc_matchers.Empty{}) + _, err := rpc.Init(initCtx, &rpc_matchers.Empty{}) cancel() if err != nil { return nil, nil, "", "", fmt.Errorf("init: %w", err) } - m := newRpcMatcher(meta, rpc, 5*time.Second, hash) - return m, &matcherLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil + m := newRpcMatcher(fileName, rpc, l.Watcher, 5*time.Second, hash) + cfg, ok := l.Watcher.Current().ByFileName(fileName) + id, name := fileName, fileName + if ok { + id = cfg.Id + name = cfg.Name + } + return m, &matcherLifecycle{rpc: rpc}, id, name, nil +} + +// IsReady reports whether this binary's YAML sidecar exists in the current registry. +func (l *MatcherAdapter) IsReady(binPath string) bool { + _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + return ok } -// IsReady always returns true - matchers have no YAML sidecar prerequisite. -func (l *MatcherAdapter) IsReady(_ string) bool { return true } -func (l *MatcherAdapter) IsShadow(_ string) bool { return false } -func (l *MatcherAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } +// IsShadow reports whether this binary's YAML declares it as a shadow or canary version. +func (l *MatcherAdapter) IsShadow(binPath string) bool { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { + return false + } + m := cfg.RolloutMode + return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow +} -func (l *MatcherAdapter) Workers(_ string) int { return 1 } +// IsEnabled reports whether the matcher's YAML sidecar still exists and is enabled. +func (l *MatcherAdapter) IsEnabled(h *plugin.PluginHandle) bool { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return ok && cfg.Enabled +} + +func (l *MatcherAdapter) Workers(binPath string) int { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok || cfg.MaxProcs <= 0 { + return 1 + } + return cfg.MaxProcs +} type matcherLifecycle struct{ rpc rpc_matchers.MatcherClient } @@ -61,9 +92,9 @@ func (l *matcherLifecycle) Shutdown(ctx context.Context) error { return err } -type matcherPlugin struct{ plugin.NetRPCUnsupportedPlugin } +type matcherPlugin struct{ goplugin.NetRPCUnsupportedPlugin } -func (p *matcherPlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { return nil } -func (p *matcherPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { +func (p *matcherPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } +func (p *matcherPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { return rpc_matchers.NewMatcherClient(c), nil } diff --git a/pkg/matchers/matcher.go b/pkg/matchers/matcher.go index c8e0d64..da840fd 100644 --- a/pkg/matchers/matcher.go +++ b/pkg/matchers/matcher.go @@ -9,13 +9,12 @@ import ( "github.com/harishhary/blink/pkg/matchers/config" ) -// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to -// import an internal package. type PluginMetadata = plugin.PluginMetadata +type MatcherMetadata = config.MatcherMetadata type Matcher interface { - MatcherMetadata() *config.MatcherMetadata - PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable + MatcherMetadata() *MatcherMetadata + Metadata() PluginMetadata Global() bool Checksum() string String() string diff --git a/pkg/matchers/pool/pool.go b/pkg/matchers/pool/pool.go index 685ec64..c69902b 100644 --- a/pkg/matchers/pool/pool.go +++ b/pkg/matchers/pool/pool.go @@ -27,7 +27,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { var results []bool err := p.Call(ctx, matcherID, canaryHashKey, func(callCtx context.Context, m matchers.Matcher) error { - if !m.MatcherMetadata().Enabled() { + if !m.MatcherMetadata().Enabled { results = make([]bool, len(evts)) for i := range results { results[i] = true @@ -47,11 +47,7 @@ func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering matchers in the pool. func poolKey(m matchers.Matcher) internal.PoolKey { cfg := m.MatcherMetadata() - version := cfg.Version() - if cs := m.Checksum(); cs != "" { - version = version + "@" + cs - } - return internal.PoolKey{PluginID: cfg.Id(), Version: version} + return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: m.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { @@ -64,8 +60,8 @@ func (p *Pool) Sync(msg messaging.Message) { case plugin.UpdateMessage[matchers.Matcher]: register(m.OnDrained, m.Items, m.MaxProcs) case plugin.UnregisterMessage[matchers.Matcher]: - p.Unregister(m.ItemID) + p.Unregister(m.ItemKey) case plugin.RemoveMessage[matchers.Matcher]: - p.Remove(m.ItemID) + p.Remove(m.ItemKey) } } diff --git a/pkg/matchers/rpc_matcher.go b/pkg/matchers/rpc_matcher.go index 8c149f0..8d8ad73 100644 --- a/pkg/matchers/rpc_matcher.go +++ b/pkg/matchers/rpc_matcher.go @@ -34,7 +34,8 @@ func (r *rpcMatcher) cfg() *config.MatcherMetadata { if r.cfgWatcher == nil { return nil } - return r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + return v } // MatcherMetadata returns the live YAML-derived matcher configuration. @@ -42,24 +43,18 @@ func (r *rpcMatcher) MatcherMetadata() *config.MatcherMetadata { if c := r.cfg(); c != nil { return c } - return &config.MatcherMetadata{FileNameField: r.fileName} + return &config.MatcherMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} } -func (r *rpcMatcher) PluginMetadata() plugin.PluginMetadata { - c := r.MatcherMetadata() - return plugin.PluginMetadata{ - ID: c.Id(), - Name: c.Name(), - Description: c.Description(), - Enabled: c.Enabled(), - Version: c.Version(), - } +func (r *rpcMatcher) Metadata() plugin.PluginMetadata { + return r.MatcherMetadata().Metadata() } -func (r *rpcMatcher) Global() bool { return r.MatcherMetadata().Global() } +func (r *rpcMatcher) Global() bool { return r.MatcherMetadata().Global } func (r *rpcMatcher) Checksum() string { return r.checksum } func (r *rpcMatcher) String() string { - return "RpcMatcher '" + r.MatcherMetadata().Name() + "' id:'" + r.MatcherMetadata().Id() + "'" + m := r.MatcherMetadata().Metadata() + return "RpcMatcher '" + m.Name + "' id:'" + m.Id + "'" } func (r *rpcMatcher) Match(ctx context.Context, evts []events.Event) ([]bool, errors.Error) { diff --git a/pkg/rules/config/config.go b/pkg/rules/config/config.go index 5428f6d..78e2645 100644 --- a/pkg/rules/config/config.go +++ b/pkg/rules/config/config.go @@ -29,14 +29,13 @@ package config import ( - "crypto/sha256" - "encoding/hex" "fmt" "os" "path/filepath" "strings" "time" + "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/scoring" "go.yaml.in/yaml/v4" @@ -55,19 +54,11 @@ func (o *Observable) Aggregation() bool { return o.AggregationVal } // RuleMetadata is the in-memory representation of a rule YAML sidecar file. type RuleMetadata struct { - // Identity - IDField string `yaml:"id"` - NameField string `yaml:"name"` - DisplayNameField string `yaml:"display_name"` - DescriptionField string `yaml:"description"` - EnabledField bool `yaml:"enabled"` - VersionField string `yaml:"version"` - FileNameField string `yaml:"file_name"` - ChecksumField string `yaml:"checksum"` + plugin.PluginMetadata `yaml:",inline"` // Scoring - SeverityStr string `yaml:"severity"` - ConfidenceStr string `yaml:"confidence"` + SeverityStr string `yaml:"severity"` + ConfidenceStr string `yaml:"confidence"` SignalThresholdStr string `yaml:"signal_threshold"` // Routing / matching @@ -95,21 +86,11 @@ type RuleMetadata struct { EnrichmentsField []string `yaml:"enrichments"` TuningRulesField []string `yaml:"tuning_rules"` - // Rollout control - KillSwitchField bool `yaml:"kill_switch"` - RolloutPctField float64 `yaml:"rollout_pct"` - ModeField string `yaml:"mode"` // "blue-green" (default), "canary", "shadow" - MinProcsField int `yaml:"min_procs"` - MaxProcsField int `yaml:"max_procs"` - // Parsed scoring values - populated by Load(); not read from YAML directly. severity scoring.Severity confidence scoring.Confidence signalThreshold scoring.Confidence riskScore scoring.RiskScore - - // Parsed rollout mode - populated by resolveRollout(). - rolloutMode internal.RolloutMode } // Load reads and validates a single YAML sidecar file, returning a *RuleMetadata @@ -136,21 +117,9 @@ func New(c RuleMetadata) (*RuleMetadata, error) { if err := c.resolveScoring(); err != nil { return nil, err } - if err := c.resolveRollout(); err != nil { - return nil, err - } return &c, nil } -// resolveRollout parses ModeField into the typed rolloutMode field. -func (c *RuleMetadata) resolveRollout() error { - if c.ModeField == "" { - c.rolloutMode = internal.RolloutModeBlueGreen - return nil - } - return c.rolloutMode.UnmarshalText([]byte(c.ModeField)) -} - // resolveScoring parses the string scoring fields to their typed equivalents // and computes the risk score. func (c *RuleMetadata) resolveScoring() error { @@ -179,8 +148,8 @@ func (c *RuleMetadata) resolveScoring() error { // resolve parses string-typed scoring fields, fills defaults, and computes // the checksum when one is not provided in the YAML. -func (c *RuleMetadata) resolve(path string, raw []byte) error { - if c.NameField == "" { +func (c *RuleMetadata) resolve(path string, _ []byte) error { + if c.Name == "" { return fmt.Errorf("name is required") } @@ -188,64 +157,49 @@ func (c *RuleMetadata) resolve(path string, raw []byte) error { return err } - if err := c.resolveRollout(); err != nil { - return err - } - // Default file_name to the YAML file's base name (without extension). - if c.FileNameField == "" { + if c.FileName == "" { base := filepath.Base(path) - c.FileNameField = strings.TrimSuffix(base, filepath.Ext(base)) - } - - // Compute checksum from raw YAML bytes when not provided. - if c.ChecksumField == "" { - h := sha256.Sum256(raw) - c.ChecksumField = hex.EncodeToString(h[:]) + c.FileName = strings.TrimSuffix(base, filepath.Ext(base)) } return nil } -func (c *RuleMetadata) Id() string { return c.IDField } -func (c *RuleMetadata) Name() string { return c.NameField } -func (c *RuleMetadata) Description() string { return c.DescriptionField } -func (c *RuleMetadata) Enabled() bool { return c.EnabledField } -func (c *RuleMetadata) FileName() string { return c.FileNameField } -func (c *RuleMetadata) DisplayName() string { return c.DisplayNameField } -func (c *RuleMetadata) References() []string { return c.ReferencesField } -func (c *RuleMetadata) Severity() scoring.Severity { return c.severity } -func (c *RuleMetadata) Confidence() scoring.Confidence { return c.confidence } -func (c *RuleMetadata) RiskScore() scoring.RiskScore { return c.riskScore } -func (c *RuleMetadata) MergeByKeys() []string { return c.MergeByKeysField } -func (c *RuleMetadata) MergeWindowMins() time.Duration { - return time.Duration(c.MergeWindowMinsField) * time.Minute -} -func (c *RuleMetadata) ReqSubkeys() []string { return c.ReqSubkeysField } -func (c *RuleMetadata) Signal() bool { return c.SignalField } +func (c *RuleMetadata) References() []string { return c.ReferencesField } +func (c *RuleMetadata) Severity() scoring.Severity { return c.severity } +func (c *RuleMetadata) Confidence() scoring.Confidence { return c.confidence } +func (c *RuleMetadata) RiskScore() scoring.RiskScore { return c.riskScore } +func (c *RuleMetadata) MergeByKeys() []string { return c.MergeByKeysField } +func (c *RuleMetadata) MergeWindowMins() time.Duration { return time.Duration(c.MergeWindowMinsField) * time.Minute } +func (c *RuleMetadata) ReqSubkeys() []string { return c.ReqSubkeysField } +func (c *RuleMetadata) Signal() bool { return c.SignalField } func (c *RuleMetadata) SignalThreshold() scoring.Confidence { return c.signalThreshold } -func (c *RuleMetadata) Tags() []string { return c.TagsField } -func (c *RuleMetadata) Dispatchers() []string { return c.DispatchersField } -func (c *RuleMetadata) LogTypes() []string { return c.LogTypesField } -func (c *RuleMetadata) Observables() []Observable { return c.ObservablesField } -func (c *RuleMetadata) Matchers() []string { return c.MatchersField } -func (c *RuleMetadata) Formatters() []string { return c.FormattersField } -func (c *RuleMetadata) Enrichments() []string { return c.EnrichmentsField } -func (c *RuleMetadata) TuningRules() []string { return c.TuningRulesField } -func (c *RuleMetadata) Checksum() string { return c.ChecksumField } -func (c *RuleMetadata) Version() string { return c.VersionField } - -// Rollout control accessors. -func (c *RuleMetadata) KillSwitch() bool { return c.KillSwitchField } -func (c *RuleMetadata) RolloutPct() float64 { return c.RolloutPctField } -func (c *RuleMetadata) RolloutMode() internal.RolloutMode { return c.rolloutMode } -func (c *RuleMetadata) MinProcs() int { return c.MinProcsField } -func (c *RuleMetadata) MaxProcs() int { return c.MaxProcsField } +func (c *RuleMetadata) Tags() []string { return c.TagsField } +func (c *RuleMetadata) Dispatchers() []string { return c.DispatchersField } +func (c *RuleMetadata) LogTypes() []string { return c.LogTypesField } +func (c *RuleMetadata) Observables() []Observable { return c.ObservablesField } +func (c *RuleMetadata) Matchers() []string { return c.MatchersField } +func (c *RuleMetadata) Formatters() []string { return c.FormattersField } +func (c *RuleMetadata) Enrichments() []string { return c.EnrichmentsField } +func (c *RuleMetadata) TuningRules() []string { return c.TuningRulesField } + +func mergeRouting(a, b internal.RoutingEntry) internal.RoutingEntry { + out := a + if b.Mode > a.Mode { + out.Mode = b.Mode + } + if b.RolloutPct > a.RolloutPct { + out.RolloutPct = b.RolloutPct + } + return out +} type Registry struct { byName map[string]*RuleMetadata byID map[string]*RuleMetadata byFileName map[string]*RuleMetadata + routing map[string]internal.RoutingEntry // merged routing config per plugin ID all []*RuleMetadata } @@ -259,6 +213,7 @@ func NewRegistry(dir string) (*Registry, error) { byName: make(map[string]*RuleMetadata), byID: make(map[string]*RuleMetadata), byFileName: make(map[string]*RuleMetadata), + routing: make(map[string]internal.RoutingEntry), } var errs []string @@ -275,10 +230,19 @@ func NewRegistry(dir string) (*Registry, error) { errs = append(errs, err.Error()) continue } - reg.byName[cfg.NameField] = cfg - reg.byFileName[cfg.FileNameField] = cfg - if cfg.IDField != "" { - reg.byID[cfg.IDField] = cfg + reg.byName[cfg.Name] = cfg + reg.byFileName[cfg.FileName] = cfg + if cfg.Id != "" { + reg.byID[cfg.Id] = cfg + re := internal.RoutingEntry{ + Mode: cfg.RolloutMode, + RolloutPct: cfg.RolloutPct, + } + if existing, ok := reg.routing[cfg.Id]; ok { + reg.routing[cfg.Id] = mergeRouting(existing, re) + } else { + reg.routing[cfg.Id] = re + } } reg.all = append(reg.all, cfg) } @@ -294,13 +258,19 @@ func (r *Registry) ByName(name string) *RuleMetadata { return r.byName[n func (r *Registry) ByID(id string) *RuleMetadata { return r.byID[id] } func (r *Registry) ByFileName(fileName string) *RuleMetadata { return r.byFileName[fileName] } +// RoutingByID returns the merged routing config for a plugin ID. +// When multiple YAML sidecars share the same ID, their routing fields are merged +// using max-restrictive semantics (see mergeRouting). The zero value (blue-green, +// no kill switch) is returned when no YAML declares a routing config for this ID. +func (r *Registry) RoutingByID(id string) internal.RoutingEntry { return r.routing[id] } + func (r *Registry) Len() int { return len(r.all) } // An empty log_types list means the rule applies to all log types. func (r *Registry) RulesForLogType(logType string) []*RuleMetadata { var result []*RuleMetadata for _, cfg := range r.all { - if !cfg.EnabledField { + if !cfg.Enabled { continue } if len(cfg.LogTypesField) == 0 { diff --git a/pkg/rules/helpers.go b/pkg/rules/helpers.go index 0ae3852..50af919 100644 --- a/pkg/rules/helpers.go +++ b/pkg/rules/helpers.go @@ -8,7 +8,7 @@ import ( // DefaultSubKeysInEvent checks that every required subkey is present in the event. func DefaultSubKeysInEvent(r *config.RuleMetadata, event events.Event) bool { - if !r.Enabled() { + if !r.Enabled { return false } for _, k := range r.ReqSubkeys() { diff --git a/pkg/rules/launcher.go b/pkg/rules/launcher.go index fc698e4..4aa065a 100644 --- a/pkg/rules/launcher.go +++ b/pkg/rules/launcher.go @@ -44,7 +44,7 @@ func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath st } rule := newRpcRule(fileName, rpc, l.Watcher, hash) - return rule, &ruleLifecycle{rpc: rpc}, cfg.Id(), cfg.Name(), nil + return rule, &ruleLifecycle{rpc: rpc}, cfg.Id, cfg.Name, nil } // Reports whether this binary is safe to start: @@ -58,7 +58,7 @@ func (l *RuleAdapter) IsReady(binPath string) bool { if cfg == nil { return false } - return !l.Watcher.HasBlockingErrorFor(cfg.Id(), cfg.FileName()+".yaml") + return !l.Watcher.HasBlockingErrorFor(cfg.Id, cfg.FileName+".yaml") } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. @@ -69,23 +69,22 @@ func (l *RuleAdapter) IsShadow(binPath string) bool { if cfg == nil { return false } - m := cfg.RolloutMode() - return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow + return cfg.RolloutMode == internal.RolloutModeCanary || cfg.RolloutMode == internal.RolloutModeShadow } // IsEnabled reports whether the rule's YAML sidecar still exists and is enabled. // Called during every reconcile func so process-zombies (binary running but YAML removed/disabled) are stopped without waiting for a binary change. func (l *RuleAdapter) IsEnabled(h *plugin.PluginHandle) bool { cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) - return cfg != nil && cfg.Enabled() + return cfg != nil && cfg.Enabled } func (l *RuleAdapter) Workers(binPath string) int { cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil || cfg.MaxProcs() <= 0 { + if cfg == nil || cfg.MaxProcs <= 0 { return 1 } - return cfg.MaxProcs() + return cfg.MaxProcs } type ruleLifecycle struct { diff --git a/pkg/rules/manager_test.go b/pkg/rules/manager_test.go index f4668e9..a107ba4 100644 --- a/pkg/rules/manager_test.go +++ b/pkg/rules/manager_test.go @@ -10,7 +10,7 @@ import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/messaging" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/rules/config" ) @@ -65,8 +65,8 @@ func waitForRegister(t *testing.T, ch <-chan messaging.Message, name string, tim for { select { case msg := <-ch: - if rm, ok := msg.(pluginmgr.RegisterMessage[rules.Rule]); ok { - if len(rm.Items) > 0 && rm.Items[0].Name() == name { + if rm, ok := msg.(plugin.RegisterMessage[rules.Rule]); ok { + if len(rm.Items) > 0 && rm.Items[0].RuleMetadata().Name == name { return true } } @@ -82,8 +82,8 @@ func waitForUnregister(t *testing.T, ch <-chan messaging.Message, name string, t for { select { case msg := <-ch: - if um, ok := msg.(pluginmgr.UnregisterMessage[rules.Rule]); ok { - if um.ItemID == name { + if um, ok := msg.(plugin.UnregisterMessage[rules.Rule]); ok { + if um.ItemKey.Id == name { return true } } @@ -99,8 +99,8 @@ func waitForRemove(t *testing.T, ch <-chan messaging.Message, id string, timeout for { select { case msg := <-ch: - if rm, ok := msg.(pluginmgr.RemoveMessage[rules.Rule]); ok { - if rm.ItemID == id { + if rm, ok := msg.(plugin.RemoveMessage[rules.Rule]); ok { + if rm.ItemKey.Id == id { return true } } diff --git a/pkg/rules/pool/pool.go b/pkg/rules/pool/pool.go index 61ce356..2d94a31 100644 --- a/pkg/rules/pool/pool.go +++ b/pkg/rules/pool/pool.go @@ -33,7 +33,7 @@ func NewPool(watcher *config.Watcher, drainTimeout time.Duration) *Pool { func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]rules.EvalResult, errors.Error) { var results []rules.EvalResult err := p.Call(ctx, ruleID, canaryHashKey, func(ctx context.Context, r rules.Rule) error { - if !r.RuleMetadata().Enabled() { + if !r.RuleMetadata().Enabled { results = make([]rules.EvalResult, len(evts)) return nil } @@ -53,11 +53,7 @@ func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, // string in the rule config - preventing silent same-key overwrites in the pool. func poolKey(r rules.Rule) internal.PoolKey { cfg := r.RuleMetadata() - version := cfg.Version() - if cs := r.Checksum(); cs != "" { - version = version + "@" + cs - } - return internal.PoolKey{PluginID: cfg.Id(), Version: version} + return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: r.Checksum()} } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering rules in the pool. @@ -68,8 +64,8 @@ func (p *Pool) Sync(msg messaging.Message) { case plugin.UpdateMessage[rules.Rule]: p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, m.OnDrained) case plugin.UnregisterMessage[rules.Rule]: - p.Unregister(m.ItemID) + p.Unregister(m.ItemKey) case plugin.RemoveMessage[rules.Rule]: - p.Remove(m.ItemID) + p.Remove(m.ItemKey) } } diff --git a/pkg/rules/rpc_rules.go b/pkg/rules/rpc_rules.go index ac80154..f0e91ea 100644 --- a/pkg/rules/rpc_rules.go +++ b/pkg/rules/rpc_rules.go @@ -41,14 +41,14 @@ func (r *rpcRule) RuleMetadata() *config.RuleMetadata { return c } // Return a minimal stub so callers don't need to nil-check. - return &config.RuleMetadata{FileNameField: r.fileName} + return &config.RuleMetadata{PluginMetadata: plugin.PluginMetadata{FileName: r.fileName, Name: r.fileName, Id: r.fileName}} } func (r *rpcRule) Checksum() string { return r.checksum } -func (r *rpcRule) PluginMetadata() plugin.PluginMetadata { +func (r *rpcRule) Metadata() plugin.PluginMetadata { if c := r.cfg(); c != nil { - return c.PluginMetadata() + return c.Metadata() } return plugin.PluginMetadata{Name: r.fileName} } diff --git a/pkg/rules/rule.go b/pkg/rules/rule.go index 3b4c4d7..8034066 100644 --- a/pkg/rules/rule.go +++ b/pkg/rules/rule.go @@ -9,7 +9,8 @@ import ( "github.com/harishhary/blink/pkg/rules/config" ) -type Observables = config.Observable +type PluginMetadata = plugin.PluginMetadata +type RuleMetadata = config.RuleMetadata // EvalResult is the per-event outcome returned by Rule.Evaluate. // Fields beyond Matched are populated only when the plugin implements the @@ -28,8 +29,8 @@ type EvalResult struct { // All rules receive a slice of events and return one EvalResult per event. // PluginMetadata + Checksum together satisfy plugin.Syncable. type Rule interface { - RuleMetadata() *config.RuleMetadata - PluginMetadata() plugin.PluginMetadata + RuleMetadata() *RuleMetadata + Metadata() PluginMetadata Checksum() string Evaluate(ctx context.Context, evts []events.Event) ([]EvalResult, errors.Error) } diff --git a/pkg/rules/sdk/serve.go b/pkg/rules/sdk/serve.go index 9e2a38d..de299c0 100644 --- a/pkg/rules/sdk/serve.go +++ b/pkg/rules/sdk/serve.go @@ -22,23 +22,54 @@ const ( ) // RulePlugin is the interface that all rule plugin binaries must implement. -// Embed sdk.BaseRule to get no-op defaults for Init and Shutdown. -// All rule metadata (name, severity, log_types, etc.) lives in the YAML -// sidecar file alongside the binary - the subprocess only owns Evaluate. +// Embed sdk.BaseRule to get default no-op / pass-through implementations for +// every method. Override only the methods you need. +// +// All static rule metadata (name, severity, log_types, etc.) lives in the YAML +// sidecar file alongside the binary — the subprocess owns only evaluation logic. type RulePlugin interface { - // Init is called once after the plugin connects, before any Evaluate calls. - // Use it to compile regexes, load ML models, or open connections. Init() error Evaluate(ctx context.Context, event events.Event) (bool, errors.Error) Shutdown() error + + // AlertTitle returns a dynamic title for the alert. + // Return "" to use the YAML display_name (default). + AlertTitle(event events.Event) string + + // AlertDescription returns a dynamic description for the alert. + // Return "" to use the YAML description (default). + AlertDescription(event events.Event) string + + // AlertSeverity returns an event-level severity override. + // Return one of: "info", "low", "medium", "high", "critical", or "" to use the YAML value. + AlertSeverity(event events.Event) string + + // AlertContext returns extra key-value pairs merged into the alert event. + // Return nil to add nothing. + AlertContext(event events.Event) map[string]any + + // AlertMergeByKeys returns the merge keys for this event, overriding YAML merge_by_keys. + // Return nil to use the YAML value. + AlertMergeByKeys(event events.Event) []string + + // AlertReqSubkeys guards evaluation: return false to skip Evaluate for this event. + // Useful for dynamic field presence checks beyond the static req_subkeys in YAML. + // Return true to always evaluate (default). + AlertReqSubkeys(event events.Event) bool } -// BaseRule provides no-op defaults for Init and Shutdown. -// Embed in your rule struct to avoid implementing them when not needed. +// BaseRule provides pass-through / no-op defaults for all RulePlugin methods. +// Embed in your rule struct and override only what you need. type BaseRule struct{} -func (BaseRule) Init() error { return nil } -func (BaseRule) Shutdown() error { return nil } +func (BaseRule) Init() error { return nil } +func (BaseRule) Shutdown() error { return nil } +func (BaseRule) AlertTitle(_ events.Event) string { return "" } +func (BaseRule) AlertDescription(_ events.Event) string { return "" } +func (BaseRule) AlertSeverity(_ events.Event) string { return "" } +func (BaseRule) AlertContext(_ events.Event) map[string]any { return nil } +func (BaseRule) AlertMergeByKeys(_ events.Event) []string { return nil } +func (BaseRule) AlertReqSubkeys(_ events.Event) bool { return true } // server wraps a RulePlugin and serves the gRPC RuleServer interface. type server struct { @@ -50,32 +81,39 @@ func (s *server) Init(_ context.Context, _ *rpc_rules.Empty) (*rpc_rules.Empty, return &rpc_rules.Empty{}, s.rule.Init() } -func (s *server) Evaluate(ctx context.Context, req *rpc_rules.EvaluateRequest) (*rpc_rules.EvaluateResponse, error) { - var event events.Event - if err := json.Unmarshal(req.GetEvent().GetJson(), &event); err != nil { - return nil, err - } - matched, err := s.rule.Evaluate(ctx, event) - if err != nil { - return nil, err - } - return &rpc_rules.EvaluateResponse{Matched: matched}, nil -} - func (s *server) EvaluateBatch(ctx context.Context, req *rpc_rules.EvaluateBatchRequest) (*rpc_rules.EvaluateBatchResponse, error) { - results := make([]bool, 0, len(req.GetEvents())) + results := make([]*rpc_rules.EventResult, 0, len(req.GetEvents())) for _, ev := range req.GetEvents() { var event events.Event if err := json.Unmarshal(ev.GetJson(), &event); err != nil { return nil, err } + + if !s.rule.AlertReqSubkeys(event) { + results = append(results, &rpc_rules.EventResult{Matched: false}) + continue + } + matched, err := s.rule.Evaluate(ctx, event) if err != nil { return nil, err } - results = append(results, matched) + + result := &rpc_rules.EventResult{Matched: matched} + if matched { + result.Title = s.rule.AlertTitle(event) + result.Description = s.rule.AlertDescription(event) + result.Severity = s.rule.AlertSeverity(event) + result.MergeByKeys = s.rule.AlertMergeByKeys(event) + if c := s.rule.AlertContext(event); len(c) > 0 { + if b, err := json.Marshal(c); err == nil { + result.ContextJson = b + } + } + } + results = append(results, result) } - return &rpc_rules.EvaluateBatchResponse{Matched: results}, nil + return &rpc_rules.EvaluateBatchResponse{Results: results}, nil } func (s *server) Ping(_ context.Context, _ *rpc_rules.Empty) (*rpc_rules.Empty, error) { diff --git a/pkg/tuning_rules/config/config.go b/pkg/tuning_rules/config/config.go new file mode 100644 index 0000000..a2ee612 --- /dev/null +++ b/pkg/tuning_rules/config/config.go @@ -0,0 +1,76 @@ +// Each tuning rule binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440003" +// name: "noisy-hosts" +// display_name: "Noisy Hosts Suppressor" +// description: "Ignores alerts from known-noisy infrastructure hosts." +// enabled: true +// version: "1.0.0" +// file_name: "noisy-hosts" +// global: false +// rule_type: "ignore" # ignore | set_confidence | increase_confidence | decrease_confidence +// confidence: "" # only used when rule_type is *_confidence (e.g. "0.8" or "medium") +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/harishhary/blink/internal/plugin" + "go.yaml.in/yaml/v4" +) + +// TuningMetadata is the in-memory representation of a tuning rule YAML sidecar. +type TuningMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + Global bool `yaml:"global"` + RuleType string `yaml:"rule_type"` // "ignore", "set_confidence", "increase_confidence", "decrease_confidence" + Confidence string `yaml:"confidence"` // meaningful only for *_confidence rule types +} + +// loader implements plugin.Loader[*TuningMetadata]. +type loader struct{} + +func (loader) Load(path string) (*TuningMetadata, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("tuning config: read %s: %w", path, err) + } + var cfg TuningMetadata + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("tuning config: parse %s: %w", path, err) + } + if cfg.Name == "" { + return nil, fmt.Errorf("tuning config: %s: name is required", path) + } + if cfg.FileName == "" { + base := filepath.Base(path) + cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) + } + if cfg.Id == "" { + cfg.Id = cfg.FileName + } + return &cfg, nil +} + +func (loader) Validate(all []*TuningMetadata) error { return nil } + +// Registry and Watcher are the generic implementations parameterised for tuning rules. +type Registry = plugin.Registry[*TuningMetadata] +type Watcher = plugin.Watcher[*TuningMetadata] + +func NewRegistry(dir string) (*Registry, error) { + return plugin.NewRegistry(dir, "tuning_rule", loader{}) +} + +func NewWatcher(dir string) (*Watcher, error) { + return plugin.NewWatcher("tuning-config-watcher", dir, "tuning_rule", loader{}) +} diff --git a/pkg/tuning_rules/launcher.go b/pkg/tuning_rules/launcher.go index aa6ab0b..a7d3cf7 100644 --- a/pkg/tuning_rules/launcher.go +++ b/pkg/tuning_rules/launcher.go @@ -5,51 +5,80 @@ import ( "fmt" "time" - plugin "github.com/hashicorp/go-plugin" + goplugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" - "github.com/harishhary/blink/internal/pluginmgr" + "github.com/harishhary/blink/internal/helpers" + "github.com/harishhary/blink/internal/plugin" + internal "github.com/harishhary/blink/internal/pools" + "github.com/harishhary/blink/pkg/tuning_rules/config" "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rules" ) -type TuningRuleAdapter struct{} +type TuningRuleAdapter struct { + Watcher *config.Watcher +} func (l *TuningRuleAdapter) PluginKey() string { return "tuning_rule" } func (l *TuningRuleAdapter) MagicValue() string { return "tuning_rule_v1" } -func (l *TuningRuleAdapter) GRPCPlugin() plugin.Plugin { return &tuningPlugin{} } +func (l *TuningRuleAdapter) GRPCPlugin() goplugin.Plugin { return &tuningPlugin{} } -func (l *TuningRuleAdapter) Handshake(ctx context.Context, raw interface{}, _ string, hash string) (TuningRule, pluginmgr.PluginLifecycle, string, string, error) { +// Handshake connects to the tuning rule subprocess, calls Init, and returns a +// ready rpcTuningRule. Identity comes from the YAML sidecar, not from a GetMetadata RPC. +func (l *TuningRuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (TuningRule, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_tuning_rules.TuningRuleClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) } - metaCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - meta, err := rpc.GetMetadata(metaCtx, &rpc_tuning_rules.Empty{}) - cancel() - if err != nil { - return nil, nil, "", "", fmt.Errorf("metadata: %w", err) - } + fileName := helpers.BinaryBaseName(binPath) initCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err = rpc.Init(initCtx, &rpc_tuning_rules.Empty{}) + _, err := rpc.Init(initCtx, &rpc_tuning_rules.Empty{}) cancel() if err != nil { return nil, nil, "", "", fmt.Errorf("init: %w", err) } - tr := newRpcTuningRule(meta, rpc, hash) - return tr, &tuningLifecycle{rpc: rpc}, meta.GetId(), meta.GetName(), nil + tr := newRpcTuningRule(fileName, rpc, l.Watcher, hash) + cfg, ok := l.Watcher.Current().ByFileName(fileName) + id, name := fileName, fileName + if ok { + id = cfg.Id + name = cfg.Name + } + return tr, &tuningLifecycle{rpc: rpc}, id, name, nil } -func (l *TuningRuleAdapter) IsReady(_ string) bool { return true } -func (l *TuningRuleAdapter) IsShadow(_ string) bool { return false } +// IsReady reports whether this binary's YAML sidecar exists in the current registry. +func (l *TuningRuleAdapter) IsReady(binPath string) bool { + _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + return ok +} -// IsEnabled always returns true - tuning rules have no YAML sidecar. -func (l *TuningRuleAdapter) IsEnabled(_ *pluginmgr.PluginHandle) bool { return true } +// IsShadow reports whether this binary's YAML declares it as a shadow or canary version. +func (l *TuningRuleAdapter) IsShadow(binPath string) bool { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { + return false + } + m := cfg.RolloutMode + return m == internal.RolloutModeCanary || m == internal.RolloutModeShadow +} + +// IsEnabled reports whether the tuning rule's YAML sidecar still exists and is enabled. +func (l *TuningRuleAdapter) IsEnabled(h *plugin.PluginHandle) bool { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return ok && cfg.Enabled +} -// Workers always returns 1 - no YAML sidecar to configure parallelism. -func (l *TuningRuleAdapter) Workers(_ string) int { return 1 } +func (l *TuningRuleAdapter) Workers(binPath string) int { + cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok || cfg.MaxProcs <= 0 { + return 1 + } + return cfg.MaxProcs +} type tuningLifecycle struct { rpc rpc_tuning_rules.TuningRuleClient @@ -65,9 +94,9 @@ func (l *tuningLifecycle) Shutdown(ctx context.Context) error { return err } -type tuningPlugin struct{ plugin.NetRPCUnsupportedPlugin } +type tuningPlugin struct{ goplugin.NetRPCUnsupportedPlugin } -func (p *tuningPlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { return nil } -func (p *tuningPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { +func (p *tuningPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } +func (p *tuningPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { return rpc_tuning_rules.NewTuningRuleClient(c), nil } diff --git a/pkg/tuning_rules/pool/pool.go b/pkg/tuning_rules/pool/pool.go index 62769b2..21cef41 100644 --- a/pkg/tuning_rules/pool/pool.go +++ b/pkg/tuning_rules/pool/pool.go @@ -30,7 +30,7 @@ func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Al ) { applies = make([]bool, len(alerts)) err := p.Call(ctx, tuningRuleID, canaryHashKey, func(callCtx context.Context, t tuning.TuningRule) error { - if !t.TuningMetadata().Enabled() { + if !t.TuningMetadata().Enabled { return nil } ruleType = t.RuleType() @@ -48,11 +48,7 @@ func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Al // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering tuning rules in the pool. func poolKey(t tuning.TuningRule) internal.PoolKey { cfg := t.TuningMetadata() - version := cfg.Version() - if cs := t.Checksum(); cs != "" { - version = version + "@" + cs - } - return internal.PoolKey{PluginID: cfg.Id(), Version: version} + return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: t.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { @@ -65,8 +61,8 @@ func (p *Pool) Sync(msg messaging.Message) { case plugin.UpdateMessage[tuning.TuningRule]: register(m.OnDrained, m.Items, m.MaxProcs) case plugin.UnregisterMessage[tuning.TuningRule]: - p.Unregister(m.ItemID) + p.Unregister(m.ItemKey) case plugin.RemoveMessage[tuning.TuningRule]: - p.Remove(m.ItemID) + p.Remove(m.ItemKey) } } diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index 0620942..b5c5df3 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -33,7 +33,8 @@ func (r *rpcTuningRule) cfg() *config.TuningMetadata { if r.cfgWatcher == nil { return nil } - return r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + return v } // TuningMetadata returns the live YAML-derived tuning rule configuration. @@ -41,31 +42,24 @@ func (r *rpcTuningRule) TuningMetadata() *config.TuningMetadata { if c := r.cfg(); c != nil { return c } - return &config.TuningMetadata{FileNameField: r.fileName} + return &config.TuningMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} } -func (r *rpcTuningRule) PluginMetadata() plugin.PluginMetadata { - c := r.TuningMetadata() - return plugin.PluginMetadata{ - ID: c.Id(), - Name: c.Name(), - Description: c.Description(), - Enabled: c.Enabled(), - Version: c.Version(), - } +func (r *rpcTuningRule) Metadata() plugin.PluginMetadata { + return r.TuningMetadata().Metadata() } func (r *rpcTuningRule) Checksum() string { return r.checksum } func (r *rpcTuningRule) String() string { - c := r.TuningMetadata() - return fmt.Sprintf("TuningRule '%s' (id:%s, enabled:%t)", c.Name(), c.Id(), c.Enabled()) + m := r.TuningMetadata().Metadata() + return fmt.Sprintf("TuningRule '%s' (id:%s, enabled:%t)", m.Name, m.Id, m.Enabled) } -func (r *rpcTuningRule) Global() bool { return r.TuningMetadata().Global() } +func (r *rpcTuningRule) Global() bool { return r.TuningMetadata().Global } // RuleType parses the YAML rule_type string into a typed RuleType constant. func (r *rpcTuningRule) RuleType() RuleType { - switch r.TuningMetadata().RuleTypeStr() { + switch r.TuningMetadata().RuleType { case "set_confidence": return SetConfidence case "increase_confidence": @@ -79,7 +73,7 @@ func (r *rpcTuningRule) RuleType() RuleType { // Confidence parses the YAML confidence string into a scoring.Confidence value. func (r *rpcTuningRule) Confidence() scoring.Confidence { - conf, _ := scoring.ParseConfidence(r.TuningMetadata().ConfidenceStr()) + conf, _ := scoring.ParseConfidence(r.TuningMetadata().Confidence) return conf } diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index 7ac56ac..54cf9de 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -10,9 +10,8 @@ import ( "github.com/harishhary/blink/pkg/tuning_rules/config" ) -// PluginMetadata is re-exported from internal/plugin so plugin authors don't need to -// import an internal package. type PluginMetadata = plugin.PluginMetadata +type TuningMetadata = config.TuningMetadata type RuleType int @@ -26,8 +25,8 @@ const ( type TuningRule interface { Tune(ctx context.Context, alerts []alerts.Alert) ([]bool, errors.Error) - TuningMetadata() *config.TuningMetadata - PluginMetadata() plugin.PluginMetadata // satisfies plugin.Syncable + TuningMetadata() *TuningMetadata + Metadata() PluginMetadata Global() bool RuleType() RuleType Confidence() scoring.Confidence From 399eac7600864bee86fc931c762d403a7c6e813d Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 16:01:32 +0100 Subject: [PATCH 09/14] refactoring a bit --- pkg/alerts/alert.go | 6 +- pkg/alerts/convert.go | 77 +++-- pkg/enrichments/{launcher.go => adapter.go} | 35 +-- pkg/enrichments/config.go | 77 +++++ pkg/enrichments/config/config.go | 110 ------- pkg/enrichments/enrichment.go | 8 +- pkg/enrichments/manager.go | 13 - pkg/enrichments/plugin.go | 14 + pkg/enrichments/{pool => }/pool.go | 21 +- pkg/enrichments/rpc_enrichment.go | 17 +- pkg/enrichments/{sdk => }/serve.go | 2 +- pkg/formatters/{launcher.go => adapter.go} | 23 +- pkg/formatters/config.go | 35 +++ pkg/formatters/config/config.go | 70 ----- pkg/formatters/formatter.go | 7 +- pkg/formatters/{manager.go => plugin.go} | 7 +- pkg/formatters/{pool => }/pool.go | 21 +- pkg/formatters/rpc_formatter.go | 17 +- pkg/formatters/{sdk => }/serve.go | 2 +- pkg/matchers/{launcher.go => adapter.go} | 23 +- pkg/matchers/config.go | 35 +++ pkg/matchers/config/config.go | 72 ----- pkg/matchers/manager.go | 7 +- pkg/matchers/matcher.go | 8 +- pkg/matchers/{pool => }/pool.go | 21 +- pkg/matchers/rpc_matcher.go | 17 +- pkg/matchers/{sdk => }/serve.go | 2 +- pkg/rules/{launcher.go => adapter.go} | 39 +-- pkg/rules/config.go | 231 ++++++++++++++ pkg/rules/config/config.go | 288 ------------------ pkg/rules/config/watcher.go | 149 --------- pkg/rules/helpers.go | 3 +- pkg/rules/manager.go | 7 +- pkg/rules/manager_test.go | 19 +- pkg/rules/{pool => }/pool.go | 34 +-- pkg/rules/rpc_rules.go | 21 +- pkg/rules/rule.go | 2 - pkg/rules/{sdk => }/serve.go | 18 +- pkg/tuning_rules/{launcher.go => adapter.go} | 23 +- pkg/tuning_rules/config.go | 45 +++ pkg/tuning_rules/config/config.go | 76 ----- pkg/tuning_rules/manager.go | 7 +- pkg/tuning_rules/{pool => }/pool.go | 27 +- pkg/tuning_rules/rpc_tuning_rule.go | 27 +- .../rpc_tuning_rules/tuning_rule.pb.go | 263 ++-------------- .../rpc_tuning_rules/tuning_rule.proto | 23 +- .../rpc_tuning_rules/tuning_rule_grpc.pb.go | 84 +---- pkg/tuning_rules/{sdk => }/serve.go | 2 +- pkg/tuning_rules/tuning_rule.go | 4 +- 49 files changed, 745 insertions(+), 1394 deletions(-) rename pkg/enrichments/{launcher.go => adapter.go} (62%) create mode 100644 pkg/enrichments/config.go delete mode 100644 pkg/enrichments/config/config.go delete mode 100644 pkg/enrichments/manager.go create mode 100644 pkg/enrichments/plugin.go rename pkg/enrichments/{pool => }/pool.go (72%) rename pkg/enrichments/{sdk => }/serve.go (99%) rename pkg/formatters/{launcher.go => adapter.go} (80%) create mode 100644 pkg/formatters/config.go delete mode 100644 pkg/formatters/config/config.go rename pkg/formatters/{manager.go => plugin.go} (50%) rename pkg/formatters/{pool => }/pool.go (75%) rename pkg/formatters/{sdk => }/serve.go (99%) rename pkg/matchers/{launcher.go => adapter.go} (80%) create mode 100644 pkg/matchers/config.go delete mode 100644 pkg/matchers/config/config.go rename pkg/matchers/{pool => }/pool.go (72%) rename pkg/matchers/{sdk => }/serve.go (99%) rename pkg/rules/{launcher.go => adapter.go} (76%) create mode 100644 pkg/rules/config.go delete mode 100644 pkg/rules/config/config.go delete mode 100644 pkg/rules/config/watcher.go rename pkg/rules/{pool => }/pool.go (66%) rename pkg/rules/{sdk => }/serve.go (87%) rename pkg/tuning_rules/{launcher.go => adapter.go} (80%) create mode 100644 pkg/tuning_rules/config.go delete mode 100644 pkg/tuning_rules/config/config.go rename pkg/tuning_rules/{pool => }/pool.go (66%) rename pkg/tuning_rules/{sdk => }/serve.go (99%) diff --git a/pkg/alerts/alert.go b/pkg/alerts/alert.go index b401d6d..b977ea0 100644 --- a/pkg/alerts/alert.go +++ b/pkg/alerts/alert.go @@ -12,7 +12,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/config" + "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/scoring" ) @@ -37,7 +37,7 @@ type Alert struct { Confidence scoring.Confidence // coming from base rule but changed by tuning rules Severity scoring.Severity // coming from base rule but changed by asset tagging and AlertSeverity - Rule *config.RuleMetadata + Rule *rules.RuleMetadata OverrideMergeByKeys []string // set by plugin's AlertMergeByKeys; overrides Rule.MergeByKeys() when non-nil } @@ -51,7 +51,7 @@ func (a *Alert) MergeByKeys() []string { } // Creates a new Alert -func NewAlert(rule *config.RuleMetadata, event events.Event, optFns ...AlertOptions) (*Alert, errors.Error) { +func NewAlert(rule *rules.RuleMetadata, event events.Event, optFns ...AlertOptions) (*Alert, errors.Error) { alert := &Alert{ AlertID: uuid.NewString(), Created: time.Now().UTC(), diff --git a/pkg/alerts/convert.go b/pkg/alerts/convert.go index d9bbfae..4cbdb58 100644 --- a/pkg/alerts/convert.go +++ b/pkg/alerts/convert.go @@ -6,7 +6,7 @@ import ( "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts/pb" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/config" + "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/scoring" proto "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/structpb" @@ -37,23 +37,23 @@ func AlertToProto(a *Alert) (*pb.Alert, error) { return nil, err } p := &pb.Alert{ - AlertId: a.AlertID, - Attempts: int32(a.Attempts), - Cluster: a.Cluster, - CreatedNs: a.Created.UnixNano(), - DispatchedNs: a.Dispatched.UnixNano(), - Event: eventStruct, - Staged: a.Staged, - OutputsSent: a.OutputsSent, - EnrichmentsApplied: a.EnrichmentsApplied, - OverrideMergeByKeys: a.OverrideMergeByKeys, - LogSource: a.LogSource, - LogType: a.LogType, - SourceEntity: a.SourceEntity, - SourceService: a.SourceService, - Confidence: a.Confidence.String(), - Severity: a.Severity.String(), - Rule: ruleToProto(a.Rule), + AlertId: a.AlertID, + Attempts: int32(a.Attempts), + Cluster: a.Cluster, + CreatedNs: a.Created.UnixNano(), + DispatchedNs: a.Dispatched.UnixNano(), + Event: eventStruct, + Staged: a.Staged, + OutputsSent: a.OutputsSent, + EnrichmentsApplied: a.EnrichmentsApplied, + OverrideMergeByKeys: a.OverrideMergeByKeys, + LogSource: a.LogSource, + LogType: a.LogType, + SourceEntity: a.SourceEntity, + SourceService: a.SourceService, + Confidence: a.Confidence.String(), + Severity: a.Severity.String(), + Rule: ruleToProto(a.Rule), } return p, nil } @@ -68,29 +68,29 @@ func ProtoToAlert(p *pb.Alert) (*Alert, error) { sev, _ := scoring.ParseSeverity(p.GetSeverity()) a := &Alert{ - AlertID: p.GetAlertId(), - Attempts: int(p.GetAttempts()), - Cluster: p.GetCluster(), - Created: time.Unix(0, p.GetCreatedNs()).UTC(), - Dispatched: time.Unix(0, p.GetDispatchedNs()).UTC(), - Event: event, - Staged: p.GetStaged(), - OutputsSent: p.GetOutputsSent(), + AlertID: p.GetAlertId(), + Attempts: int(p.GetAttempts()), + Cluster: p.GetCluster(), + Created: time.Unix(0, p.GetCreatedNs()).UTC(), + Dispatched: time.Unix(0, p.GetDispatchedNs()).UTC(), + Event: event, + Staged: p.GetStaged(), + OutputsSent: p.GetOutputsSent(), EnrichmentsApplied: p.GetEnrichmentsApplied(), OverrideMergeByKeys: p.GetOverrideMergeByKeys(), - LogSource: p.GetLogSource(), - LogType: p.GetLogType(), - SourceEntity: p.GetSourceEntity(), - SourceService: p.GetSourceService(), - Confidence: conf, - Severity: sev, - Rule: protoToRuleMetadata(p.GetRule()), + LogSource: p.GetLogSource(), + LogType: p.GetLogType(), + SourceEntity: p.GetSourceEntity(), + SourceService: p.GetSourceService(), + Confidence: conf, + Severity: sev, + Rule: protoToRuleMetadata(p.GetRule()), } return a, nil } // Converts a *config.RuleMetadata to its protobuf representation for embedding in an alert payload. -func ruleToProto(r *config.RuleMetadata) *pb.RuleMetadata { +func ruleToProto(r *rules.RuleMetadata) *pb.RuleMetadata { if r == nil { return nil } @@ -114,18 +114,18 @@ func ruleToProto(r *config.RuleMetadata) *pb.RuleMetadata { Enrichments: r.Enrichments(), TuningRules: r.TuningRules(), Version: r.Version, - FileName: r.FileName, + FileName: r.Name, DisplayName: r.DisplayName, References: r.References(), } } // Reconstructs a *config.RuleMetadata from the alert's embedded rule metadata. -func protoToRuleMetadata(m *pb.RuleMetadata) *config.RuleMetadata { +func protoToRuleMetadata(m *pb.RuleMetadata) *rules.RuleMetadata { if m == nil { - return &config.RuleMetadata{} + return &rules.RuleMetadata{} } - cfg, _ := config.New(config.RuleMetadata{ + cfg, _ := rules.New(rules.RuleMetadata{ PluginMetadata: plugin.PluginMetadata{ Id: m.GetId(), Name: m.GetName(), @@ -133,7 +133,6 @@ func protoToRuleMetadata(m *pb.RuleMetadata) *config.RuleMetadata { Description: m.GetDescription(), Enabled: m.GetEnabled(), Version: m.GetVersion(), - FileName: m.GetFileName(), }, SeverityStr: m.GetSeverity(), ConfidenceStr: m.GetConfidence(), diff --git a/pkg/enrichments/launcher.go b/pkg/enrichments/adapter.go similarity index 62% rename from pkg/enrichments/launcher.go rename to pkg/enrichments/adapter.go index 4e2af2a..aa97031 100644 --- a/pkg/enrichments/launcher.go +++ b/pkg/enrichments/adapter.go @@ -11,21 +11,18 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/enrichments/config" "github.com/harishhary/blink/pkg/enrichments/rpc_enrichments" ) -type EnrichmentAdapter struct { - Watcher *config.Watcher +type EnrichmentConfigAdapter struct { + Manager *EnrichmentConfigManager } -func (l *EnrichmentAdapter) PluginKey() string { return "enrichment" } -func (l *EnrichmentAdapter) MagicValue() string { return "enrichment_v1" } -func (l *EnrichmentAdapter) GRPCPlugin() goplugin.Plugin { return &enrichmentPlugin{} } +func (l *EnrichmentConfigAdapter) PluginKey() string { return "enrichment" } +func (l *EnrichmentConfigAdapter) MagicValue() string { return "enrichment_v1" } +func (l *EnrichmentConfigAdapter) GRPCPlugin() goplugin.Plugin { return &enrichmentPlugin{} } -// Handshake connects to the enrichment subprocess, calls Init, and returns a -// ready rpcEnrichment. -func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Enrichment, plugin.PluginLifecycle, string, string, error) { +func (l *EnrichmentConfigAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Enrichment, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_enrichments.EnrichmentClient) if !ok { return nil, nil, "", "", fmt.Errorf("dispense: unexpected type %T", raw) @@ -40,8 +37,8 @@ func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, binP return nil, nil, "", "", fmt.Errorf("init: %w", err) } - e := newRpcEnrichment(fileName, rpc, l.Watcher, hash) - cfg, ok := l.Watcher.Current().ByFileName(fileName) + e := newRpcEnrichment(fileName, rpc, l.Manager, hash) + cfg, ok := l.Manager.Current().ByFileName(fileName) id, name := fileName, fileName if ok { id = cfg.Id @@ -51,14 +48,14 @@ func (l *EnrichmentAdapter) Handshake(ctx context.Context, raw interface{}, binP } // IsReady reports whether this binary's YAML sidecar exists in the current registry. -func (l *EnrichmentAdapter) IsReady(binPath string) bool { - _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) +func (l *EnrichmentConfigAdapter) IsReady(binPath string) bool { + _, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. -func (l *EnrichmentAdapter) IsShadow(binPath string) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) +func (l *EnrichmentConfigAdapter) IsShadow(binPath string) bool { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok { return false } @@ -67,13 +64,13 @@ func (l *EnrichmentAdapter) IsShadow(binPath string) bool { } // IsEnabled reports whether the enrichment's YAML sidecar still exists and is enabled. -func (l *EnrichmentAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) +func (l *EnrichmentConfigAdapter) IsEnabled(h *plugin.PluginHandle) bool { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) return ok && cfg.Enabled } -func (l *EnrichmentAdapter) Workers(binPath string) int { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) +func (l *EnrichmentConfigAdapter) Workers(binPath string) int { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok || cfg.MaxProcs <= 0 { return 1 } diff --git a/pkg/enrichments/config.go b/pkg/enrichments/config.go new file mode 100644 index 0000000..b482f0d --- /dev/null +++ b/pkg/enrichments/config.go @@ -0,0 +1,77 @@ +// Each enrichment binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440000" +// name: "geoip" +// display_name: "GeoIP Enrichment" +// description: "Adds geographic location data to events." +// enabled: true +// version: "1.0.0" +// file_name: "geoip" +// depends_on: ["other-enrichment"] +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package enrichments + +import ( + "fmt" + + cfg "github.com/harishhary/blink/internal/config" + "github.com/harishhary/blink/internal/logger" +) + +type EnrichmentConfigManager = cfg.ConfigManager[*EnrichmentMetadata] + +// Loader implements cfg.Loader[*EnrichmentMetadata] for enrichments. +// Embed cfg.BaseLoader to inherit default Parse and Validate; override CrossValidate. +type Loader struct { + cfg.BaseLoader[EnrichmentMetadata, *EnrichmentMetadata] +} + +// CrossValidate detects dependency cycles across all enrichment sidecars. +func (Loader) CrossValidate(all []*EnrichmentMetadata) error { + index := make(map[string]*EnrichmentMetadata, len(all)) + for _, e := range all { + index[e.Name] = e + } + const ( + unvisited = iota + inProgress + done + ) + state := make(map[string]int, len(all)) + var visit func(name string, path []string) error + visit = func(name string, path []string) error { + switch state[name] { + case done: + return nil + case inProgress: + return fmt.Errorf("enrichment config: dependency cycle detected: %v → %s", path, name) + } + state[name] = inProgress + e, ok := index[name] + if !ok { + return fmt.Errorf("enrichment config: %q depends on unknown enrichment %q", path[len(path)-1], name) + } + for _, dep := range e.DependsOn { + if err := visit(dep, append(path, name)); err != nil { + return err + } + } + state[name] = done + return nil + } + for _, e := range all { + if err := visit(e.Name, []string{}); err != nil { + return err + } + } + return nil +} + +func NewEnrichmentConfigManager(log *logger.Logger, dir string) *EnrichmentConfigManager { + return cfg.NewConfigManager[*EnrichmentMetadata](log, "enrichment", dir, Loader{}) +} diff --git a/pkg/enrichments/config/config.go b/pkg/enrichments/config/config.go deleted file mode 100644 index 5b02beb..0000000 --- a/pkg/enrichments/config/config.go +++ /dev/null @@ -1,110 +0,0 @@ -// Each enrichment binary ships alongside a .yaml sidecar file. -// -// YAML schema example: -// -// id: "550e8400-e29b-41d4-a716-446655440000" -// name: "geoip" -// display_name: "GeoIP Enrichment" -// description: "Adds geographic location data to events." -// enabled: true -// version: "1.0.0" -// file_name: "geoip" -// depends_on: ["other-enrichment"] -// mode: "blue-green" -// min_procs: 1 -// max_procs: 2 - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/harishhary/blink/internal/plugin" - "go.yaml.in/yaml/v4" -) - -// EnrichmentMetadata is the in-memory representation of an enrichment YAML sidecar. -type EnrichmentMetadata struct { - plugin.PluginMetadata `yaml:",inline"` - DependsOn []string `yaml:"depends_on"` -} - -// loader implements plugin.Loader[*EnrichmentMetadata]. -type loader struct{} - -func (loader) Load(path string) (*EnrichmentMetadata, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("enrichment config: read %s: %w", path, err) - } - var cfg EnrichmentMetadata - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("enrichment config: parse %s: %w", path, err) - } - if cfg.Name == "" { - return nil, fmt.Errorf("enrichment config: %s: name is required", path) - } - if cfg.FileName == "" { - base := filepath.Base(path) - cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) - } - if cfg.Id == "" { - cfg.Id = cfg.FileName - } - return &cfg, nil -} - -func (loader) Validate(all []*EnrichmentMetadata) error { - index := make(map[string]*EnrichmentMetadata, len(all)) - for _, e := range all { - index[e.Name] = e - } - const ( - unvisited = iota - inProgress - done - ) - state := make(map[string]int, len(all)) - var visit func(name string, path []string) error - visit = func(name string, path []string) error { - switch state[name] { - case done: - return nil - case inProgress: - return fmt.Errorf("enrichment config: dependency cycle detected: %v → %s", path, name) - } - state[name] = inProgress - e, ok := index[name] - if !ok { - return fmt.Errorf("enrichment config: %q depends on unknown enrichment %q", path[len(path)-1], name) - } - for _, dep := range e.DependsOn { - if err := visit(dep, append(path, name)); err != nil { - return err - } - } - state[name] = done - return nil - } - for _, e := range all { - if err := visit(e.Name, []string{}); err != nil { - return err - } - } - return nil -} - -// Registry and Watcher are the generic implementations parameterised for enrichments. -type Registry = plugin.Registry[*EnrichmentMetadata] -type Watcher = plugin.Watcher[*EnrichmentMetadata] - -func NewRegistry(dir string) (*Registry, error) { - return plugin.NewRegistry(dir, "enrichment", loader{}) -} - -func NewWatcher(dir string) (*Watcher, error) { - return plugin.NewWatcher("enrichment-config-watcher", dir, "enrichment", loader{}) -} diff --git a/pkg/enrichments/enrichment.go b/pkg/enrichments/enrichment.go index c7198b0..3548277 100644 --- a/pkg/enrichments/enrichment.go +++ b/pkg/enrichments/enrichment.go @@ -6,13 +6,17 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/enrichments/config" ) // PluginMetadata is re-exported from internal/plugin so plugin authors don't need to // import an internal package. type PluginMetadata = plugin.PluginMetadata -type EnrichmentMetadata = config.EnrichmentMetadata + +// EnrichmentMetadata is the in-memory representation of an enrichment YAML sidecar. +type EnrichmentMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + DependsOn []string `yaml:"depends_on"` +} type Enrichment interface { Enrich(ctx context.Context, alerts []*alerts.Alert) errors.Error diff --git a/pkg/enrichments/manager.go b/pkg/enrichments/manager.go deleted file mode 100644 index 2dbcd55..0000000 --- a/pkg/enrichments/manager.go +++ /dev/null @@ -1,13 +0,0 @@ -package enrichments - -import ( - "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/plugin" - "github.com/harishhary/blink/pkg/enrichments/config" -) - -var enrichmentManagerMetrics = plugin.NewPluginManagerMetrics("enrichmentsvc") - -func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Enrichment] { - return plugin.NewPluginManager[Enrichment](log, notify, dir, &EnrichmentAdapter{Watcher: watcher}, enrichmentManagerMetrics) -} diff --git a/pkg/enrichments/plugin.go b/pkg/enrichments/plugin.go new file mode 100644 index 0000000..d3eb6be --- /dev/null +++ b/pkg/enrichments/plugin.go @@ -0,0 +1,14 @@ +package enrichments + +import ( + "github.com/harishhary/blink/internal/logger" + "github.com/harishhary/blink/internal/plugin" +) + +var enrichmentManagerMetrics = plugin.NewPluginManagerMetrics("enrichmentsvc") + +type EnrichmentPluginManager = plugin.PluginManager[Enrichment] + +func NewEnrichmentPluginManager(log *logger.Logger, notify plugin.Notify, dir string, manager *EnrichmentConfigManager) *EnrichmentPluginManager { + return plugin.NewPluginManager[Enrichment](log, notify, dir, &EnrichmentConfigAdapter{Manager: manager}, enrichmentManagerMetrics) +} diff --git a/pkg/enrichments/pool/pool.go b/pkg/enrichments/pool.go similarity index 72% rename from pkg/enrichments/pool/pool.go rename to pkg/enrichments/pool.go index c3822ce..456f12a 100644 --- a/pkg/enrichments/pool/pool.go +++ b/pkg/enrichments/pool.go @@ -1,4 +1,4 @@ -package pool +package enrichments import ( "context" @@ -10,16 +10,15 @@ import ( "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/enrichments" ) type Pool struct { - *internal.ProcessPool[enrichments.Enrichment] + *internal.ProcessPool[Enrichment] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[enrichments.Enrichment](routing.Config(), internal.NewPoolMetrics("enrichments"), drainTimeout), + ProcessPool: internal.NewProcessPool[Enrichment](routing.Config(), internal.NewPoolMetrics("enrichments"), drainTimeout), } } @@ -27,7 +26,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { // absent/removed refer to the plugin state. errs contains per-alert errors (nil on success). func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts.Alert, canaryHashKey string) (absent bool, removed bool, errs []errors.Error) { errs = make([]errors.Error, len(alerts)) - err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e enrichments.Enrichment) error { + err := p.Call(ctx, enrichmentID, canaryHashKey, func(callCtx context.Context, e Enrichment) error { if !e.EnrichmentMetadata().Enabled { return nil } @@ -50,23 +49,23 @@ func (p *Pool) Enrich(ctx context.Context, enrichmentID string, alerts []*alerts return false, false, errs } -func poolKey(e enrichments.Enrichment) internal.PoolKey { +func poolKey(e Enrichment) internal.PoolKey { cfg := e.EnrichmentMetadata() return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: e.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []enrichments.Enrichment, maxProcs int) { + register := func(onDrained func(), items []Enrichment, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case plugin.RegisterMessage[enrichments.Enrichment]: + case plugin.RegisterMessage[Enrichment]: register(nil, m.Items, m.MaxProcs) - case plugin.UpdateMessage[enrichments.Enrichment]: + case plugin.UpdateMessage[Enrichment]: register(m.OnDrained, m.Items, m.MaxProcs) - case plugin.UnregisterMessage[enrichments.Enrichment]: + case plugin.UnregisterMessage[Enrichment]: p.Unregister(m.ItemKey) - case plugin.RemoveMessage[enrichments.Enrichment]: + case plugin.RemoveMessage[Enrichment]: p.Remove(m.ItemKey) } } diff --git a/pkg/enrichments/rpc_enrichment.go b/pkg/enrichments/rpc_enrichment.go index 13d9003..99b57b3 100644 --- a/pkg/enrichments/rpc_enrichment.go +++ b/pkg/enrichments/rpc_enrichment.go @@ -7,40 +7,39 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/enrichments/config" "github.com/harishhary/blink/pkg/enrichments/rpc_enrichments" ) type rpcEnrichment struct { - cfgWatcher *config.Watcher + cfgManager *EnrichmentConfigManager fileName string checksum string client rpc_enrichments.EnrichmentClient } -func newRpcEnrichment(fileName string, client rpc_enrichments.EnrichmentClient, watcher *config.Watcher, checksum string) *rpcEnrichment { +func newRpcEnrichment(fileName string, client rpc_enrichments.EnrichmentClient, manager *EnrichmentConfigManager, checksum string) *rpcEnrichment { return &rpcEnrichment{ - cfgWatcher: watcher, + cfgManager: manager, fileName: fileName, checksum: checksum, client: client, } } -func (r *rpcEnrichment) cfg() *config.EnrichmentMetadata { - if r.cfgWatcher == nil { +func (r *rpcEnrichment) cfg() *EnrichmentMetadata { + if r.cfgManager == nil { return nil } - v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgManager.Current().ByFileName(r.fileName) return v } // EnrichmentMetadata returns the live YAML-derived enrichment configuration. -func (r *rpcEnrichment) EnrichmentMetadata() *config.EnrichmentMetadata { +func (r *rpcEnrichment) EnrichmentMetadata() *EnrichmentMetadata { if c := r.cfg(); c != nil { return c } - return &config.EnrichmentMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} + return &EnrichmentMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName}} } func (r *rpcEnrichment) Metadata() plugin.PluginMetadata { diff --git a/pkg/enrichments/sdk/serve.go b/pkg/enrichments/serve.go similarity index 99% rename from pkg/enrichments/sdk/serve.go rename to pkg/enrichments/serve.go index e03ed34..f1102e0 100644 --- a/pkg/enrichments/sdk/serve.go +++ b/pkg/enrichments/serve.go @@ -1,4 +1,4 @@ -package sdk +package enrichments import ( "context" diff --git a/pkg/formatters/launcher.go b/pkg/formatters/adapter.go similarity index 80% rename from pkg/formatters/launcher.go rename to pkg/formatters/adapter.go index fb086d9..dde027a 100644 --- a/pkg/formatters/launcher.go +++ b/pkg/formatters/adapter.go @@ -11,17 +11,16 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/formatters/config" "github.com/harishhary/blink/pkg/formatters/rpc_formatters" ) // FormatterAdapter implements goplugin.PluginAdapter[Formatter]. type FormatterAdapter struct { - Watcher *config.Watcher + Manager *FormatterConfigManager } -func (l *FormatterAdapter) PluginKey() string { return "formatter" } -func (l *FormatterAdapter) MagicValue() string { return "formatter_v1" } +func (l *FormatterAdapter) PluginKey() string { return "formatter" } +func (l *FormatterAdapter) MagicValue() string { return "formatter_v1" } func (l *FormatterAdapter) GRPCPlugin() goplugin.Plugin { return &formatterPlugin{} } // Handshake connects to the formatter subprocess, calls Init, and returns a @@ -41,8 +40,8 @@ func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, binPa return nil, nil, "", "", fmt.Errorf("init: %w", err) } - f := newRpcFormatter(fileName, rpc, l.Watcher, hash) - cfg, ok := l.Watcher.Current().ByFileName(fileName) + f := newRpcFormatter(fileName, rpc, l.Manager, hash) + cfg, ok := l.Manager.Current().ByFileName(fileName) id, name := fileName, fileName if ok { id = cfg.Id @@ -53,13 +52,13 @@ func (l *FormatterAdapter) Handshake(ctx context.Context, raw interface{}, binPa // IsReady reports whether this binary's YAML sidecar exists in the current registry. func (l *FormatterAdapter) IsReady(binPath string) bool { - _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + _, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. func (l *FormatterAdapter) IsShadow(binPath string) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok { return false } @@ -69,12 +68,12 @@ func (l *FormatterAdapter) IsShadow(binPath string) bool { // IsEnabled reports whether the formatter's YAML sidecar still exists and is enabled. func (l *FormatterAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) return ok && cfg.Enabled } func (l *FormatterAdapter) Workers(binPath string) int { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok || cfg.MaxProcs <= 0 { return 1 } @@ -95,7 +94,9 @@ func (l *formatterLifecycle) Shutdown(ctx context.Context) error { return err } -type formatterPlugin struct{ goplugin.NetRPCUnsupportedPlugin } +type formatterPlugin struct { + goplugin.NetRPCUnsupportedPlugin +} func (p *formatterPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } func (p *formatterPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { diff --git a/pkg/formatters/config.go b/pkg/formatters/config.go new file mode 100644 index 0000000..5935ae0 --- /dev/null +++ b/pkg/formatters/config.go @@ -0,0 +1,35 @@ +// Each formatter binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440001" +// name: "json-summary" +// display_name: "JSON Summary Formatter" +// description: "Formats alert data as a structured JSON summary." +// enabled: true +// version: "1.0.0" +// file_name: "json-summary" +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package formatters + +import ( + cfg "github.com/harishhary/blink/internal/config" + "github.com/harishhary/blink/internal/logger" +) + +// Registry and Manager are the generic implementations parameterised for formatters. +type Registry = cfg.Registry[*FormatterMetadata] +type FormatterConfigManager = cfg.ConfigManager[*FormatterMetadata] + +// Loader implements cfg.Loader[*FormatterMetadata] for formatters. +// Embed cfg.BaseLoader to inherit default Parse, Validate, and CrossValidate. +type Loader struct { + cfg.BaseLoader[FormatterMetadata, *FormatterMetadata] +} + +func NewFormatterConfigManager(log *logger.Logger, dir string) *FormatterConfigManager { + return cfg.NewConfigManager[*FormatterMetadata](log, "formatter", dir, Loader{}) +} diff --git a/pkg/formatters/config/config.go b/pkg/formatters/config/config.go deleted file mode 100644 index f02624c..0000000 --- a/pkg/formatters/config/config.go +++ /dev/null @@ -1,70 +0,0 @@ -// Each formatter binary ships alongside a .yaml sidecar file. -// -// YAML schema example: -// -// id: "550e8400-e29b-41d4-a716-446655440001" -// name: "json-summary" -// display_name: "JSON Summary Formatter" -// description: "Formats alert data as a structured JSON summary." -// enabled: true -// version: "1.0.0" -// file_name: "json-summary" -// mode: "blue-green" -// min_procs: 1 -// max_procs: 2 - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/harishhary/blink/internal/plugin" - "go.yaml.in/yaml/v4" -) - -// FormatterMetadata is the in-memory representation of a formatter YAML sidecar. -type FormatterMetadata struct { - plugin.PluginMetadata `yaml:",inline"` -} - -// loader implements plugin.Loader[*FormatterMetadata]. -type loader struct{} - -func (loader) Load(path string) (*FormatterMetadata, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("formatter config: read %s: %w", path, err) - } - var cfg FormatterMetadata - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("formatter config: parse %s: %w", path, err) - } - if cfg.Name == "" { - return nil, fmt.Errorf("formatter config: %s: name is required", path) - } - if cfg.FileName == "" { - base := filepath.Base(path) - cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) - } - if cfg.Id == "" { - cfg.Id = cfg.FileName - } - return &cfg, nil -} - -func (loader) Validate(all []*FormatterMetadata) error { return nil } - -// Registry and Watcher are the generic implementations parameterised for formatters. -type Registry = plugin.Registry[*FormatterMetadata] -type Watcher = plugin.Watcher[*FormatterMetadata] - -func NewRegistry(dir string) (*Registry, error) { - return plugin.NewRegistry(dir, "formatter", loader{}) -} - -func NewWatcher(dir string) (*Watcher, error) { - return plugin.NewWatcher("formatter-config-watcher", dir, "formatter", loader{}) -} diff --git a/pkg/formatters/formatter.go b/pkg/formatters/formatter.go index cb7c0df..d641401 100644 --- a/pkg/formatters/formatter.go +++ b/pkg/formatters/formatter.go @@ -6,11 +6,14 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/formatters/config" ) type PluginMetadata = plugin.PluginMetadata -type FormatterMetadata = config.FormatterMetadata + +// FormatterMetadata is the in-memory representation of a formatter YAML sidecar. +type FormatterMetadata struct { + plugin.PluginMetadata `yaml:",inline"` +} type Formatter interface { Format(ctx context.Context, alerts []*alerts.Alert) ([]map[string]any, errors.Error) diff --git a/pkg/formatters/manager.go b/pkg/formatters/plugin.go similarity index 50% rename from pkg/formatters/manager.go rename to pkg/formatters/plugin.go index 99e18c1..2841d51 100644 --- a/pkg/formatters/manager.go +++ b/pkg/formatters/plugin.go @@ -3,11 +3,12 @@ package formatters import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/plugin" - "github.com/harishhary/blink/pkg/formatters/config" ) var formatterManagerMetrics = plugin.NewPluginManagerMetrics("formatters") -func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Formatter] { - return plugin.NewPluginManager[Formatter](log, notify, dir, &FormatterAdapter{Watcher: watcher}, formatterManagerMetrics) +type FormaterPluginManager = plugin.PluginManager[Formatter] + +func NewFormatterPluginManager(log *logger.Logger, notify plugin.Notify, dir string, manager *FormatterConfigManager) *FormaterPluginManager { + return plugin.NewPluginManager[Formatter](log, notify, dir, &FormatterAdapter{Manager: manager}, formatterManagerMetrics) } diff --git a/pkg/formatters/pool/pool.go b/pkg/formatters/pool.go similarity index 75% rename from pkg/formatters/pool/pool.go rename to pkg/formatters/pool.go index ef79d34..9877b41 100644 --- a/pkg/formatters/pool/pool.go +++ b/pkg/formatters/pool.go @@ -1,4 +1,4 @@ -package pool +package formatters import ( "context" @@ -10,16 +10,15 @@ import ( "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/formatters" ) type Pool struct { - *internal.ProcessPool[formatters.Formatter] + *internal.ProcessPool[Formatter] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[formatters.Formatter](routing.Config(), internal.NewPoolMetrics("formatters"), drainTimeout), + ProcessPool: internal.NewProcessPool[Formatter](routing.Config(), internal.NewPoolMetrics("formatters"), drainTimeout), } } @@ -30,7 +29,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts.Alert, canaryHashKey string) (outs []map[string]any, absent bool, removed bool, errs []errors.Error) { outs = make([]map[string]any, len(alerts)) errs = make([]errors.Error, len(alerts)) - err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f formatters.Formatter) error { + err := p.Call(ctx, formatterID, canaryHashKey, func(callCtx context.Context, f Formatter) error { if !f.FormatterMetadata().Enabled { return nil } @@ -56,23 +55,23 @@ func (p *Pool) Format(ctx context.Context, formatterID string, alerts []*alerts. return outs, false, false, errs } -func poolKey(f formatters.Formatter) internal.PoolKey { +func poolKey(f Formatter) internal.PoolKey { cfg := f.FormatterMetadata() return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: f.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []formatters.Formatter, maxProcs int) { + register := func(onDrained func(), items []Formatter, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case plugin.RegisterMessage[formatters.Formatter]: + case plugin.RegisterMessage[Formatter]: register(nil, m.Items, m.MaxProcs) - case plugin.UpdateMessage[formatters.Formatter]: + case plugin.UpdateMessage[Formatter]: register(m.OnDrained, m.Items, m.MaxProcs) - case plugin.UnregisterMessage[formatters.Formatter]: + case plugin.UnregisterMessage[Formatter]: p.Unregister(m.ItemKey) - case plugin.RemoveMessage[formatters.Formatter]: + case plugin.RemoveMessage[Formatter]: p.Remove(m.ItemKey) } } diff --git a/pkg/formatters/rpc_formatter.go b/pkg/formatters/rpc_formatter.go index 5c089fd..24f5bc0 100644 --- a/pkg/formatters/rpc_formatter.go +++ b/pkg/formatters/rpc_formatter.go @@ -8,40 +8,39 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/formatters/config" "github.com/harishhary/blink/pkg/formatters/rpc_formatters" ) type rpcFormatter struct { - cfgWatcher *config.Watcher + cfgManager *FormatterConfigManager fileName string checksum string client rpc_formatters.FormatterClient } -func newRpcFormatter(fileName string, client rpc_formatters.FormatterClient, watcher *config.Watcher, checksum string) *rpcFormatter { +func newRpcFormatter(fileName string, client rpc_formatters.FormatterClient, manager *FormatterConfigManager, checksum string) *rpcFormatter { return &rpcFormatter{ - cfgWatcher: watcher, + cfgManager: manager, fileName: fileName, checksum: checksum, client: client, } } -func (f *rpcFormatter) cfg() *config.FormatterMetadata { - if f.cfgWatcher == nil { +func (f *rpcFormatter) cfg() *FormatterMetadata { + if f.cfgManager == nil { return nil } - v, _ := f.cfgWatcher.Current().ByFileName(f.fileName) + v, _ := f.cfgManager.Current().ByFileName(f.fileName) return v } // FormatterMetadata returns the live YAML-derived formatter configuration. -func (f *rpcFormatter) FormatterMetadata() *config.FormatterMetadata { +func (f *rpcFormatter) FormatterMetadata() *FormatterMetadata { if c := f.cfg(); c != nil { return c } - return &config.FormatterMetadata{PluginMetadata: plugin.PluginMetadata{Id: f.fileName, Name: f.fileName, FileName: f.fileName}} + return &FormatterMetadata{PluginMetadata: plugin.PluginMetadata{Id: f.fileName, Name: f.fileName}} } func (f *rpcFormatter) Metadata() plugin.PluginMetadata { diff --git a/pkg/formatters/sdk/serve.go b/pkg/formatters/serve.go similarity index 99% rename from pkg/formatters/sdk/serve.go rename to pkg/formatters/serve.go index ecac91f..80a5d68 100644 --- a/pkg/formatters/sdk/serve.go +++ b/pkg/formatters/serve.go @@ -1,4 +1,4 @@ -package sdk +package formatters import ( "context" diff --git a/pkg/matchers/launcher.go b/pkg/matchers/adapter.go similarity index 80% rename from pkg/matchers/launcher.go rename to pkg/matchers/adapter.go index 769ab39..b1492da 100644 --- a/pkg/matchers/launcher.go +++ b/pkg/matchers/adapter.go @@ -11,16 +11,15 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/matchers/config" "github.com/harishhary/blink/pkg/matchers/rpc_matchers" ) type MatcherAdapter struct { - Watcher *config.Watcher + Manager *MatcherConfigManager } -func (l *MatcherAdapter) PluginKey() string { return "matcher" } -func (l *MatcherAdapter) MagicValue() string { return "matcher_v1" } +func (l *MatcherAdapter) PluginKey() string { return "matcher" } +func (l *MatcherAdapter) MagicValue() string { return "matcher_v1" } func (l *MatcherAdapter) GRPCPlugin() goplugin.Plugin { return &matcherPlugin{} } // Handshake connects to the matcher subprocess, calls Init, and returns a @@ -40,8 +39,8 @@ func (l *MatcherAdapter) Handshake(ctx context.Context, raw interface{}, binPath return nil, nil, "", "", fmt.Errorf("init: %w", err) } - m := newRpcMatcher(fileName, rpc, l.Watcher, 5*time.Second, hash) - cfg, ok := l.Watcher.Current().ByFileName(fileName) + m := newRpcMatcher(fileName, rpc, l.Manager, 5*time.Second, hash) + cfg, ok := l.Manager.Current().ByFileName(fileName) id, name := fileName, fileName if ok { id = cfg.Id @@ -52,13 +51,13 @@ func (l *MatcherAdapter) Handshake(ctx context.Context, raw interface{}, binPath // IsReady reports whether this binary's YAML sidecar exists in the current registry. func (l *MatcherAdapter) IsReady(binPath string) bool { - _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + _, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. func (l *MatcherAdapter) IsShadow(binPath string) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok { return false } @@ -68,12 +67,12 @@ func (l *MatcherAdapter) IsShadow(binPath string) bool { // IsEnabled reports whether the matcher's YAML sidecar still exists and is enabled. func (l *MatcherAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) return ok && cfg.Enabled } func (l *MatcherAdapter) Workers(binPath string) int { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok || cfg.MaxProcs <= 0 { return 1 } @@ -92,7 +91,9 @@ func (l *matcherLifecycle) Shutdown(ctx context.Context) error { return err } -type matcherPlugin struct{ goplugin.NetRPCUnsupportedPlugin } +type matcherPlugin struct { + goplugin.NetRPCUnsupportedPlugin +} func (p *matcherPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } func (p *matcherPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { diff --git a/pkg/matchers/config.go b/pkg/matchers/config.go new file mode 100644 index 0000000..50296ab --- /dev/null +++ b/pkg/matchers/config.go @@ -0,0 +1,35 @@ +// Each matcher binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440002" +// name: "prod-accounts" +// display_name: "Production Accounts Matcher" +// description: "Matches events from production AWS accounts." +// enabled: true +// version: "1.0.0" +// file_name: "prod-accounts" +// global: false +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package matchers + +import ( + cfg "github.com/harishhary/blink/internal/config" + "github.com/harishhary/blink/internal/logger" +) + +// Registry and Manager are the generic implementations parameterised for matchers. +type MatcherConfigManager = cfg.ConfigManager[*MatcherMetadata] + +// Loader implements cfg.Loader[*MatcherMetadata] for matchers. +// Embed cfg.BaseLoader to inherit default Parse, Validate, and CrossValidate. +type Loader struct { + cfg.BaseLoader[MatcherMetadata, *MatcherMetadata] +} + +func NewMatcherConfigManager(log *logger.Logger, dir string) *MatcherConfigManager { + return cfg.NewConfigManager[*MatcherMetadata](log, "matcher", dir, Loader{}) +} diff --git a/pkg/matchers/config/config.go b/pkg/matchers/config/config.go deleted file mode 100644 index 47a9477..0000000 --- a/pkg/matchers/config/config.go +++ /dev/null @@ -1,72 +0,0 @@ -// Each matcher binary ships alongside a .yaml sidecar file. -// -// YAML schema example: -// -// id: "550e8400-e29b-41d4-a716-446655440002" -// name: "prod-accounts" -// display_name: "Production Accounts Matcher" -// description: "Matches events from production AWS accounts." -// enabled: true -// version: "1.0.0" -// file_name: "prod-accounts" -// global: false -// mode: "blue-green" -// min_procs: 1 -// max_procs: 2 - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/harishhary/blink/internal/plugin" - "go.yaml.in/yaml/v4" -) - -// MatcherMetadata is the in-memory representation of a matcher YAML sidecar. -type MatcherMetadata struct { - plugin.PluginMetadata `yaml:",inline"` - Global bool `yaml:"global"` -} - -// loader implements plugin.Loader[*MatcherMetadata]. -type loader struct{} - -func (loader) Load(path string) (*MatcherMetadata, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("matcher config: read %s: %w", path, err) - } - var cfg MatcherMetadata - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("matcher config: parse %s: %w", path, err) - } - if cfg.Name == "" { - return nil, fmt.Errorf("matcher config: %s: name is required", path) - } - if cfg.FileName == "" { - base := filepath.Base(path) - cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) - } - if cfg.Id == "" { - cfg.Id = cfg.FileName - } - return &cfg, nil -} - -func (loader) Validate(all []*MatcherMetadata) error { return nil } - -// Registry and Watcher are the generic implementations parameterised for matchers. -type Registry = plugin.Registry[*MatcherMetadata] -type Watcher = plugin.Watcher[*MatcherMetadata] - -func NewRegistry(dir string) (*Registry, error) { - return plugin.NewRegistry(dir, "matcher", loader{}) -} - -func NewWatcher(dir string) (*Watcher, error) { - return plugin.NewWatcher("matcher-config-watcher", dir, "matcher", loader{}) -} diff --git a/pkg/matchers/manager.go b/pkg/matchers/manager.go index a3c27cb..bb4c480 100644 --- a/pkg/matchers/manager.go +++ b/pkg/matchers/manager.go @@ -3,11 +3,12 @@ package matchers import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/plugin" - "github.com/harishhary/blink/pkg/matchers/config" ) var matcherManagerMetrics = plugin.NewPluginManagerMetrics("matchersvc") -func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Matcher] { - return plugin.NewPluginManager[Matcher](log, notify, dir, &MatcherAdapter{Watcher: watcher}, matcherManagerMetrics) +type MatcherPluginManager = plugin.PluginManager[Matcher] + +func NewMatcherPluginManager(log *logger.Logger, notify plugin.Notify, dir string, manager *MatcherConfigManager) *plugin.PluginManager[Matcher] { + return plugin.NewPluginManager[Matcher](log, notify, dir, &MatcherAdapter{Manager: manager}, matcherManagerMetrics) } diff --git a/pkg/matchers/matcher.go b/pkg/matchers/matcher.go index da840fd..de60cce 100644 --- a/pkg/matchers/matcher.go +++ b/pkg/matchers/matcher.go @@ -6,11 +6,15 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/matchers/config" ) type PluginMetadata = plugin.PluginMetadata -type MatcherMetadata = config.MatcherMetadata + +// MatcherMetadata is the in-memory representation of a matcher YAML sidecar. +type MatcherMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + Global bool `yaml:"global"` +} type Matcher interface { MatcherMetadata() *MatcherMetadata diff --git a/pkg/matchers/pool/pool.go b/pkg/matchers/pool.go similarity index 72% rename from pkg/matchers/pool/pool.go rename to pkg/matchers/pool.go index c69902b..064da27 100644 --- a/pkg/matchers/pool/pool.go +++ b/pkg/matchers/pool.go @@ -1,4 +1,4 @@ -package pool +package matchers import ( "context" @@ -9,16 +9,15 @@ import ( "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/matchers" ) type Pool struct { - *internal.ProcessPool[matchers.Matcher] + *internal.ProcessPool[Matcher] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[matchers.Matcher](routing.Config(), internal.NewPoolMetrics("matchers"), drainTimeout), + ProcessPool: internal.NewProcessPool[Matcher](routing.Config(), internal.NewPoolMetrics("matchers"), drainTimeout), } } @@ -26,7 +25,7 @@ func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { // Disabled matchers are treated as pass-through (all results true). func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, canaryHashKey string) ([]bool, errors.Error) { var results []bool - err := p.Call(ctx, matcherID, canaryHashKey, func(callCtx context.Context, m matchers.Matcher) error { + err := p.Call(ctx, matcherID, canaryHashKey, func(callCtx context.Context, m Matcher) error { if !m.MatcherMetadata().Enabled { results = make([]bool, len(evts)) for i := range results { @@ -45,23 +44,23 @@ func (p *Pool) Match(ctx context.Context, matcherID string, evts []events.Event, } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering matchers in the pool. -func poolKey(m matchers.Matcher) internal.PoolKey { +func poolKey(m Matcher) internal.PoolKey { cfg := m.MatcherMetadata() return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: m.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []matchers.Matcher, maxProcs int) { + register := func(onDrained func(), items []Matcher, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case plugin.RegisterMessage[matchers.Matcher]: + case plugin.RegisterMessage[Matcher]: register(nil, m.Items, m.MaxProcs) - case plugin.UpdateMessage[matchers.Matcher]: + case plugin.UpdateMessage[Matcher]: register(m.OnDrained, m.Items, m.MaxProcs) - case plugin.UnregisterMessage[matchers.Matcher]: + case plugin.UnregisterMessage[Matcher]: p.Unregister(m.ItemKey) - case plugin.RemoveMessage[matchers.Matcher]: + case plugin.RemoveMessage[Matcher]: p.Remove(m.ItemKey) } } diff --git a/pkg/matchers/rpc_matcher.go b/pkg/matchers/rpc_matcher.go index 8d8ad73..a91407c 100644 --- a/pkg/matchers/rpc_matcher.go +++ b/pkg/matchers/rpc_matcher.go @@ -8,21 +8,20 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/matchers/config" "github.com/harishhary/blink/pkg/matchers/rpc_matchers" ) type rpcMatcher struct { - cfgWatcher *config.Watcher + cfgManager *MatcherConfigManager fileName string checksum string client rpc_matchers.MatcherClient timeout time.Duration } -func newRpcMatcher(fileName string, client rpc_matchers.MatcherClient, watcher *config.Watcher, timeout time.Duration, checksum string) *rpcMatcher { +func newRpcMatcher(fileName string, client rpc_matchers.MatcherClient, manager *MatcherConfigManager, timeout time.Duration, checksum string) *rpcMatcher { return &rpcMatcher{ - cfgWatcher: watcher, + cfgManager: manager, fileName: fileName, checksum: checksum, client: client, @@ -30,20 +29,20 @@ func newRpcMatcher(fileName string, client rpc_matchers.MatcherClient, watcher * } } -func (r *rpcMatcher) cfg() *config.MatcherMetadata { - if r.cfgWatcher == nil { +func (r *rpcMatcher) cfg() *MatcherMetadata { + if r.cfgManager == nil { return nil } - v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgManager.Current().ByFileName(r.fileName) return v } // MatcherMetadata returns the live YAML-derived matcher configuration. -func (r *rpcMatcher) MatcherMetadata() *config.MatcherMetadata { +func (r *rpcMatcher) MatcherMetadata() *MatcherMetadata { if c := r.cfg(); c != nil { return c } - return &config.MatcherMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} + return &MatcherMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName}} } func (r *rpcMatcher) Metadata() plugin.PluginMetadata { diff --git a/pkg/matchers/sdk/serve.go b/pkg/matchers/serve.go similarity index 99% rename from pkg/matchers/sdk/serve.go rename to pkg/matchers/serve.go index 2d8629b..b1f933a 100644 --- a/pkg/matchers/sdk/serve.go +++ b/pkg/matchers/serve.go @@ -1,4 +1,4 @@ -package sdk +package matchers import ( "context" diff --git a/pkg/rules/launcher.go b/pkg/rules/adapter.go similarity index 76% rename from pkg/rules/launcher.go rename to pkg/rules/adapter.go index 4aa065a..ea66476 100644 --- a/pkg/rules/launcher.go +++ b/pkg/rules/adapter.go @@ -11,19 +11,18 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/rules/rpc_rules" ) type RuleAdapter struct { - Watcher *config.Watcher + Manager *RuleConfigManager } -func (l *RuleAdapter) PluginKey() string { return "rule" } -func (l *RuleAdapter) MagicValue() string { return "rule_v1" } +func (l *RuleAdapter) PluginKey() string { return "rule" } +func (l *RuleAdapter) MagicValue() string { return "rule_v1" } func (l *RuleAdapter) GRPCPlugin() goplugin.Plugin { return &rulePlugin{} } -// Connects to the rule subprocess, reads the YAML sidecar for its metadata, calls Init, and returns a ready rpcRule. The rule binary's basename must match the YAML file_name field. +// Connects to the rule subprocess, reads the YAML sidecar for its metadata, calls Init, and returns a ready rpcRule. The rule binary's basename must match the YAML name field. func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath string, hash string) (Rule, plugin.PluginLifecycle, string, string, error) { rpc, ok := raw.(rpc_rules.RuleClient) if !ok { @@ -31,9 +30,9 @@ func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath st } fileName := helpers.BinaryBaseName(binPath) - cfg := l.Watcher.Current().ByFileName(fileName) - if cfg == nil { - return nil, nil, "", "", fmt.Errorf("rule launcher: no YAML sidecar found for binary %q (looked up file_name=%q)", binPath, fileName) + cfg, ok := l.Manager.Current().ByFileName(fileName) + if !ok { + return nil, nil, "", "", fmt.Errorf("rule launcher: no YAML sidecar found for binary %q (looked up name=%q)", binPath, fileName) } initCtx, cancel := context.WithTimeout(ctx, 10*time.Second) @@ -43,7 +42,7 @@ func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath st return nil, nil, "", "", fmt.Errorf("init: %w", err) } - rule := newRpcRule(fileName, rpc, l.Watcher, hash) + rule := newRpcRule(fileName, rpc, l.Manager, hash) return rule, &ruleLifecycle{rpc: rpc}, cfg.Id, cfg.Name, nil } @@ -54,19 +53,19 @@ func (l *RuleAdapter) Handshake(ctx context.Context, raw interface{}, binPath st // a cached set - so there is no race between the config watcher's reload debounce and // the manager's reconcile reacting to the same fsnotify event. func (l *RuleAdapter) IsReady(binPath string) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { return false } - return !l.Watcher.HasBlockingErrorFor(cfg.Id, cfg.FileName+".yaml") + return !l.Manager.HasBlockingErrorFor(cfg.Id, cfg.Name+".yaml") } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. // reconcile() starts non-shadow binaries first so the stable version always wins the active // pool slot on a fresh start, regardless of filename alphabetical order. func (l *RuleAdapter) IsShadow(binPath string) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok { return false } return cfg.RolloutMode == internal.RolloutModeCanary || cfg.RolloutMode == internal.RolloutModeShadow @@ -75,13 +74,13 @@ func (l *RuleAdapter) IsShadow(binPath string) bool { // IsEnabled reports whether the rule's YAML sidecar still exists and is enabled. // Called during every reconcile func so process-zombies (binary running but YAML removed/disabled) are stopped without waiting for a binary change. func (l *RuleAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) - return cfg != nil && cfg.Enabled + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + return ok && cfg.Enabled } func (l *RuleAdapter) Workers(binPath string) int { - cfg := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) - if cfg == nil || cfg.MaxProcs <= 0 { + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) + if !ok || cfg.MaxProcs <= 0 { return 1 } return cfg.MaxProcs @@ -102,7 +101,9 @@ func (l *ruleLifecycle) Shutdown(ctx context.Context) error { } // rulePlugin is the go-plugin client-side stub. -type rulePlugin struct{ goplugin.NetRPCUnsupportedPlugin } +type rulePlugin struct { + goplugin.NetRPCUnsupportedPlugin +} func (p *rulePlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil diff --git a/pkg/rules/config.go b/pkg/rules/config.go new file mode 100644 index 0000000..4195d11 --- /dev/null +++ b/pkg/rules/config.go @@ -0,0 +1,231 @@ +// Each rule binary ships alongside a .yaml file that contains the +// full rule configuration. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440000" +// name: "brute_force_login" +// display_name: "Brute Force Login Attempt" +// description: "Detects repeated failed login attempts from a single source." +// enabled: true +// version: "1.2.0" +// severity: "high" +// confidence: "medium" +// signal: true +// signal_threshold: "medium" +// log_types: ["auth", "cloudtrail"] +// matchers: ["prod-accounts"] +// merge_by_keys: ["source_ip", "username"] +// merge_window_mins: 60 +// req_subkeys: ["source_ip"] +// tags: ["t1078", "initial-access"] +// dispatchers: ["pagerduty", "slack"] +// formatters: ["json-summary"] +// enrichments: ["geoip"] +// tuning_rules: ["noisy-hosts"] +// references: ["https://attack.mitre.org/techniques/T1110/"] + +package rules + +import ( + "fmt" + "os" + "regexp" + "time" + + cfg "github.com/harishhary/blink/internal/config" + "github.com/harishhary/blink/internal/logger" + "github.com/harishhary/blink/pkg/scoring" + "go.yaml.in/yaml/v4" +) + +// ValidationError is an alias so callers in this package use the short name. +type ValidationError = cfg.ValidationError + +var semverRE = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+`) + +// Observable describes one observable field that a rule can surface in an alert. +type Observable struct { + NameVal string `yaml:"name"` + DescriptionVal string `yaml:"description"` + AggregationVal bool `yaml:"aggregation"` +} + +func (o *Observable) Name() string { return o.NameVal } +func (o *Observable) Description() string { return o.DescriptionVal } +func (o *Observable) Aggregation() bool { return o.AggregationVal } + +// RuleMetadata is the in-memory representation of a rule YAML sidecar file. +type RuleMetadata struct { + PluginMetadata `yaml:",inline"` + + // Scoring + SeverityStr string `yaml:"severity"` + ConfidenceStr string `yaml:"confidence"` + SignalThresholdStr string `yaml:"signal_threshold"` + + // Routing / matching + LogTypesField []string `yaml:"log_types"` + MatchersField []string `yaml:"matchers"` + ReqSubkeysField []string `yaml:"req_subkeys"` + + // Merging + MergeByKeysField []string `yaml:"merge_by_keys"` + MergeWindowMinsField uint32 `yaml:"merge_window_mins"` + + // Signal + SignalField bool `yaml:"signal"` + + // Labelling + TagsField []string `yaml:"tags"` + ReferencesField []string `yaml:"references"` + + // Observables - static fields the rule surfaces in generated alerts. + ObservablesField []Observable `yaml:"observables"` + + // Pipeline stages + DispatchersField []string `yaml:"dispatchers"` + FormattersField []string `yaml:"formatters"` + EnrichmentsField []string `yaml:"enrichments"` + TuningRulesField []string `yaml:"tuning_rules"` + + // Parsed scoring values - populated by Load(); not read from YAML directly. + severity scoring.Severity + confidence scoring.Confidence + signalThreshold scoring.Confidence + riskScore scoring.RiskScore +} + +// Load reads and validates a single YAML sidecar file, returning a *RuleMetadata + +// New constructs a RuleMetadata from already-parsed field values (e.g. from a proto payload). +func New(c RuleMetadata) (*RuleMetadata, error) { + if err := c.resolveScoring(); err != nil { + return nil, err + } + return &c, nil +} + +// resolveScoring parses the string scoring fields to their typed equivalents +// and computes the risk score. +func (c *RuleMetadata) resolveScoring() error { + var err error + if c.SeverityStr != "" { + c.severity, err = scoring.ParseSeverity(c.SeverityStr) + if err != nil { + return err + } + } + if c.ConfidenceStr != "" { + c.confidence, err = scoring.ParseConfidence(c.ConfidenceStr) + if err != nil { + return err + } + } + if c.SignalThresholdStr != "" { + c.signalThreshold, err = scoring.ParseConfidence(c.SignalThresholdStr) + if err != nil { + return err + } + } + c.riskScore = scoring.ComputeRiskScore(c.confidence, c.severity) + return nil +} + +func (c *RuleMetadata) References() []string { return c.ReferencesField } +func (c *RuleMetadata) Severity() scoring.Severity { return c.severity } +func (c *RuleMetadata) Confidence() scoring.Confidence { return c.confidence } +func (c *RuleMetadata) RiskScore() scoring.RiskScore { return c.riskScore } +func (c *RuleMetadata) MergeByKeys() []string { return c.MergeByKeysField } +func (c *RuleMetadata) MergeWindowMins() time.Duration { + return time.Duration(c.MergeWindowMinsField) * time.Minute +} +func (c *RuleMetadata) ReqSubkeys() []string { return c.ReqSubkeysField } +func (c *RuleMetadata) Signal() bool { return c.SignalField } +func (c *RuleMetadata) SignalThreshold() scoring.Confidence { return c.signalThreshold } +func (c *RuleMetadata) Tags() []string { return c.TagsField } +func (c *RuleMetadata) Dispatchers() []string { return c.DispatchersField } +func (c *RuleMetadata) LogTypes() []string { return c.LogTypesField } +func (c *RuleMetadata) Observables() []Observable { return c.ObservablesField } +func (c *RuleMetadata) Matchers() []string { return c.MatchersField } +func (c *RuleMetadata) Formatters() []string { return c.FormattersField } +func (c *RuleMetadata) Enrichments() []string { return c.EnrichmentsField } +func (c *RuleMetadata) TuningRules() []string { return c.TuningRulesField } + +// Registry is the generic registry parameterised for rules. +type RuleRegistry = cfg.Registry[*RuleMetadata] + +// Manager is the generic config manager parameterised for rules. +type RuleConfigManager = cfg.ConfigManager[*RuleMetadata] + +// Loader implements cfg.Loader[*RuleMetadata] for rules. +// Embed cfg.BaseLoader to inherit default CrossValidate (no-op); override Parse and Validate. +type Loader struct { + cfg.BaseLoader[RuleMetadata, *RuleMetadata] +} + +func (Loader) Parse(path string) (*RuleMetadata, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read %s: %w", path, err) + } + var cfg RuleMetadata + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("parse %s: %w", path, err) + } + if err := cfg.resolveScoring(); err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + return &cfg, nil +} + +// Validate extends the common structural checks with rule-specific field validation +// (required id, required version, semver format). +func (l Loader) Validate(items []*RuleMetadata, binaries []string) []ValidationError { + var errs []ValidationError + for _, cfg := range items { + name := cfg.Name + ".yaml" + if cfg.Id == "" { + errs = append(errs, ValidationError{File: name, Field: "id", Blocking: true, Message: "required field missing"}) + } + if cfg.Version == "" { + errs = append(errs, ValidationError{File: name, Field: "version", PluginID: cfg.Id, Blocking: true, Message: "required field missing"}) + } else if !semverRE.MatchString(cfg.Version) { + errs = append(errs, ValidationError{ + File: name, + Field: "version", + PluginID: cfg.Id, + Blocking: true, + Message: fmt.Sprintf("%q is not valid semver (expected MAJOR.MINOR.PATCH)", cfg.Version), + }) + } + } + errs = append(errs, l.BaseLoader.Validate(items, binaries)...) + return errs +} + +func NewRuleConfigManager(log *logger.Logger, dir string) *RuleConfigManager { + return cfg.NewConfigManager[*RuleMetadata](log, "rule", dir, Loader{}) +} + +// RulesForLogType returns all enabled rules from reg that apply to logType. +// An empty log_types list means the rule applies to all log types. +func RulesForLogType(reg *RuleRegistry, logType string) []*RuleMetadata { + var result []*RuleMetadata + for _, cfg := range reg.All() { + if !cfg.Enabled { + continue + } + if len(cfg.LogTypesField) == 0 { + result = append(result, cfg) + continue + } + for _, lt := range cfg.LogTypesField { + if lt == logType { + result = append(result, cfg) + break + } + } + } + return result +} diff --git a/pkg/rules/config/config.go b/pkg/rules/config/config.go deleted file mode 100644 index 78e2645..0000000 --- a/pkg/rules/config/config.go +++ /dev/null @@ -1,288 +0,0 @@ -// Each rule binary ships alongside a .yaml file that contains the -// full rule configuration. -// -// YAML schema example: -// -// id: "550e8400-e29b-41d4-a716-446655440000" -// name: "brute_force_login" -// display_name: "Brute Force Login Attempt" -// description: "Detects repeated failed login attempts from a single source." -// enabled: true -// version: "1.2.0" -// file_name: "brute_force_login" -// severity: "high" -// confidence: "medium" -// signal: true -// signal_threshold: "medium" -// log_types: ["auth", "cloudtrail"] -// matchers: ["prod-accounts"] -// merge_by_keys: ["source_ip", "username"] -// merge_window_mins: 60 -// req_subkeys: ["source_ip"] -// tags: ["t1078", "initial-access"] -// dispatchers: ["pagerduty", "slack"] -// formatters: ["json-summary"] -// enrichments: ["geoip"] -// tuning_rules: ["noisy-hosts"] -// references: ["https://attack.mitre.org/techniques/T1110/"] - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/harishhary/blink/internal/plugin" - internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/scoring" - "go.yaml.in/yaml/v4" -) - -// Observable describes one observable field that a rule can surface in an alert. -type Observable struct { - NameVal string `yaml:"name"` - DescriptionVal string `yaml:"description"` - AggregationVal bool `yaml:"aggregation"` -} - -func (o *Observable) Name() string { return o.NameVal } -func (o *Observable) Description() string { return o.DescriptionVal } -func (o *Observable) Aggregation() bool { return o.AggregationVal } - -// RuleMetadata is the in-memory representation of a rule YAML sidecar file. -type RuleMetadata struct { - plugin.PluginMetadata `yaml:",inline"` - - // Scoring - SeverityStr string `yaml:"severity"` - ConfidenceStr string `yaml:"confidence"` - SignalThresholdStr string `yaml:"signal_threshold"` - - // Routing / matching - LogTypesField []string `yaml:"log_types"` - MatchersField []string `yaml:"matchers"` - ReqSubkeysField []string `yaml:"req_subkeys"` - - // Merging - MergeByKeysField []string `yaml:"merge_by_keys"` - MergeWindowMinsField uint32 `yaml:"merge_window_mins"` - - // Signal - SignalField bool `yaml:"signal"` - - // Labelling - TagsField []string `yaml:"tags"` - ReferencesField []string `yaml:"references"` - - // Observables - static fields the rule surfaces in generated alerts. - ObservablesField []Observable `yaml:"observables"` - - // Pipeline stages - DispatchersField []string `yaml:"dispatchers"` - FormattersField []string `yaml:"formatters"` - EnrichmentsField []string `yaml:"enrichments"` - TuningRulesField []string `yaml:"tuning_rules"` - - // Parsed scoring values - populated by Load(); not read from YAML directly. - severity scoring.Severity - confidence scoring.Confidence - signalThreshold scoring.Confidence - riskScore scoring.RiskScore -} - -// Load reads and validates a single YAML sidecar file, returning a *RuleMetadata -func Load(path string) (*RuleMetadata, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("config: read %s: %w", path, err) - } - - var cfg RuleMetadata - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("config: parse %s: %w", path, err) - } - - if err := cfg.resolve(path, data); err != nil { - return nil, fmt.Errorf("config: validate %s: %w", path, err) - } - - return &cfg, nil -} - -// New constructs a RuleMetadata from already-parsed field values (e.g. from a proto payload). -func New(c RuleMetadata) (*RuleMetadata, error) { - if err := c.resolveScoring(); err != nil { - return nil, err - } - return &c, nil -} - -// resolveScoring parses the string scoring fields to their typed equivalents -// and computes the risk score. -func (c *RuleMetadata) resolveScoring() error { - var err error - if c.SeverityStr != "" { - c.severity, err = scoring.ParseSeverity(c.SeverityStr) - if err != nil { - return err - } - } - if c.ConfidenceStr != "" { - c.confidence, err = scoring.ParseConfidence(c.ConfidenceStr) - if err != nil { - return err - } - } - if c.SignalThresholdStr != "" { - c.signalThreshold, err = scoring.ParseConfidence(c.SignalThresholdStr) - if err != nil { - return err - } - } - c.riskScore = scoring.ComputeRiskScore(c.confidence, c.severity) - return nil -} - -// resolve parses string-typed scoring fields, fills defaults, and computes -// the checksum when one is not provided in the YAML. -func (c *RuleMetadata) resolve(path string, _ []byte) error { - if c.Name == "" { - return fmt.Errorf("name is required") - } - - if err := c.resolveScoring(); err != nil { - return err - } - - // Default file_name to the YAML file's base name (without extension). - if c.FileName == "" { - base := filepath.Base(path) - c.FileName = strings.TrimSuffix(base, filepath.Ext(base)) - } - - return nil -} - -func (c *RuleMetadata) References() []string { return c.ReferencesField } -func (c *RuleMetadata) Severity() scoring.Severity { return c.severity } -func (c *RuleMetadata) Confidence() scoring.Confidence { return c.confidence } -func (c *RuleMetadata) RiskScore() scoring.RiskScore { return c.riskScore } -func (c *RuleMetadata) MergeByKeys() []string { return c.MergeByKeysField } -func (c *RuleMetadata) MergeWindowMins() time.Duration { return time.Duration(c.MergeWindowMinsField) * time.Minute } -func (c *RuleMetadata) ReqSubkeys() []string { return c.ReqSubkeysField } -func (c *RuleMetadata) Signal() bool { return c.SignalField } -func (c *RuleMetadata) SignalThreshold() scoring.Confidence { return c.signalThreshold } -func (c *RuleMetadata) Tags() []string { return c.TagsField } -func (c *RuleMetadata) Dispatchers() []string { return c.DispatchersField } -func (c *RuleMetadata) LogTypes() []string { return c.LogTypesField } -func (c *RuleMetadata) Observables() []Observable { return c.ObservablesField } -func (c *RuleMetadata) Matchers() []string { return c.MatchersField } -func (c *RuleMetadata) Formatters() []string { return c.FormattersField } -func (c *RuleMetadata) Enrichments() []string { return c.EnrichmentsField } -func (c *RuleMetadata) TuningRules() []string { return c.TuningRulesField } - -func mergeRouting(a, b internal.RoutingEntry) internal.RoutingEntry { - out := a - if b.Mode > a.Mode { - out.Mode = b.Mode - } - if b.RolloutPct > a.RolloutPct { - out.RolloutPct = b.RolloutPct - } - return out -} - -type Registry struct { - byName map[string]*RuleMetadata - byID map[string]*RuleMetadata - byFileName map[string]*RuleMetadata - routing map[string]internal.RoutingEntry // merged routing config per plugin ID - all []*RuleMetadata -} - -func NewRegistry(dir string) (*Registry, error) { - entries, err := os.ReadDir(dir) - if err != nil { - return nil, fmt.Errorf("config: read dir %s: %w", dir, err) - } - - reg := &Registry{ - byName: make(map[string]*RuleMetadata), - byID: make(map[string]*RuleMetadata), - byFileName: make(map[string]*RuleMetadata), - routing: make(map[string]internal.RoutingEntry), - } - - var errs []string - for _, e := range entries { - if e.IsDir() { - continue - } - name := e.Name() - if !strings.HasSuffix(name, ".yaml") && !strings.HasSuffix(name, ".yml") { - continue - } - cfg, err := Load(filepath.Join(dir, name)) - if err != nil { - errs = append(errs, err.Error()) - continue - } - reg.byName[cfg.Name] = cfg - reg.byFileName[cfg.FileName] = cfg - if cfg.Id != "" { - reg.byID[cfg.Id] = cfg - re := internal.RoutingEntry{ - Mode: cfg.RolloutMode, - RolloutPct: cfg.RolloutPct, - } - if existing, ok := reg.routing[cfg.Id]; ok { - reg.routing[cfg.Id] = mergeRouting(existing, re) - } else { - reg.routing[cfg.Id] = re - } - } - reg.all = append(reg.all, cfg) - } - - if len(errs) > 0 { - return reg, fmt.Errorf("config: %d file(s) failed to load:\n %s", len(errs), strings.Join(errs, "\n ")) - } - return reg, nil -} - -func (r *Registry) All() []*RuleMetadata { return r.all } -func (r *Registry) ByName(name string) *RuleMetadata { return r.byName[name] } -func (r *Registry) ByID(id string) *RuleMetadata { return r.byID[id] } -func (r *Registry) ByFileName(fileName string) *RuleMetadata { return r.byFileName[fileName] } - -// RoutingByID returns the merged routing config for a plugin ID. -// When multiple YAML sidecars share the same ID, their routing fields are merged -// using max-restrictive semantics (see mergeRouting). The zero value (blue-green, -// no kill switch) is returned when no YAML declares a routing config for this ID. -func (r *Registry) RoutingByID(id string) internal.RoutingEntry { return r.routing[id] } - -func (r *Registry) Len() int { return len(r.all) } - -// An empty log_types list means the rule applies to all log types. -func (r *Registry) RulesForLogType(logType string) []*RuleMetadata { - var result []*RuleMetadata - for _, cfg := range r.all { - if !cfg.Enabled { - continue - } - if len(cfg.LogTypesField) == 0 { - result = append(result, cfg) - continue - } - for _, lt := range cfg.LogTypesField { - if lt == logType { - result = append(result, cfg) - break - } - } - } - return result -} diff --git a/pkg/rules/config/watcher.go b/pkg/rules/config/watcher.go deleted file mode 100644 index df1a2ce..0000000 --- a/pkg/rules/config/watcher.go +++ /dev/null @@ -1,149 +0,0 @@ -package config - -import ( - "context" - "sync/atomic" - "time" - - "github.com/fsnotify/fsnotify" - svcctx "github.com/harishhary/blink/internal/context" - "github.com/harishhary/blink/internal/errors" - "github.com/harishhary/blink/internal/logger" -) - -const debounce = 400 * time.Millisecond - -// Watcher watches a directory of YAML sidecar files and rebuilds the Registry -// when any file changes. -type Watcher struct { - svcctx.ServiceContext - dir string - current atomic.Pointer[Registry] -} - -// Creates a Watcher for dir and does an initial load. -func NewWatcher(dir string) (*Watcher, error) { - sc := svcctx.New("config-watcher") - sc.Logger = logger.New(sc.Name(), "dev") - - w := &Watcher{ServiceContext: sc, dir: dir} - - errs := Validate(dir) - for _, err := range errs { - w.ErrorF("config validation: %v", err) - } - - reg, err := NewRegistry(dir) - if err != nil && reg == nil { - return nil, err - } - if err != nil { - w.ErrorF("initial load errors: %v", err) - } - w.current.Store(reg) - return w, nil -} - -// HasBlockingError reports whether pluginID currently has any blocking validation error. -// It runs Validate() fresh on every call so that IsReady() in the rule adapter always -// sees the current disk state - avoiding the race between the config watcher's reload -// debounce and the manager's reconcile firing from the same fsnotify event. -func (w *Watcher) HasBlockingError(pluginID string) bool { - if pluginID == "" { - return false - } - for _, err := range Validate(w.dir) { - if err.Blocking() && err.PluginID == pluginID { - return true - } - } - return false -} - -// HasBlockingErrorFor is like HasBlockingError but also matches by YAML file name -// (e.g. "brute_force.yaml"). This catches rules whose id: field is missing - those -// errors carry no PluginID, but do carry File set to the YAML filename. -func (w *Watcher) HasBlockingErrorFor(pluginID, yamlFile string) bool { - for _, e := range Validate(w.dir) { - if !e.Blocking() { - continue - } - if pluginID != "" && e.PluginID == pluginID { - return true - } - if yamlFile != "" && e.File == yamlFile { - return true - } - } - return false -} - -// Returns the most recently loaded Registry. -func (w *Watcher) Current() *Registry { - return w.current.Load() -} - -// Starts the fsnotify watch loop. Blocks until ctx is cancelled. -func (w *Watcher) Run(ctx context.Context) errors.Error { - fsw, err := fsnotify.NewWatcher() - if err != nil { - return errors.NewE(err) - } - defer fsw.Close() - - if err := fsw.Add(w.dir); err != nil { - return errors.NewE(err) - } - - var timer *time.Timer - resetTimer := func() { - if timer != nil { - timer.Stop() - } - timer = time.AfterFunc(debounce, w.reload) - } - - for { - select { - case event, ok := <-fsw.Events: - if !ok { - return nil - } - if isYAML(event.Name) { - resetTimer() - } - case err, ok := <-fsw.Errors: - if !ok { - return nil - } - w.ErrorF("fsnotify error: %v", err) - case <-ctx.Done(): - if timer != nil { - timer.Stop() - } - return nil - } - } -} - -func (w *Watcher) reload() { - errs := Validate(w.dir) - for _, err := range errs { - w.ErrorF("config validation: %v", err) - } - - reg, err := NewRegistry(w.dir) - if err != nil { - w.ErrorF("reload error: %v", err) - if reg == nil { - return - } - } - w.current.Store(reg) - w.Info("loaded %d rule configs from %s", reg.Len(), w.dir) -} - -func isYAML(name string) bool { - n := len(name) - return (n > 5 && name[n-5:] == ".yaml") || (n > 4 && name[n-4:] == ".yml") -} diff --git a/pkg/rules/helpers.go b/pkg/rules/helpers.go index 50af919..f78b03d 100644 --- a/pkg/rules/helpers.go +++ b/pkg/rules/helpers.go @@ -3,11 +3,10 @@ package rules import ( "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/config" ) // DefaultSubKeysInEvent checks that every required subkey is present in the event. -func DefaultSubKeysInEvent(r *config.RuleMetadata, event events.Event) bool { +func DefaultSubKeysInEvent(r *RuleMetadata, event events.Event) bool { if !r.Enabled { return false } diff --git a/pkg/rules/manager.go b/pkg/rules/manager.go index 45475b0..85469d1 100644 --- a/pkg/rules/manager.go +++ b/pkg/rules/manager.go @@ -3,11 +3,12 @@ package rules import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/plugin" - "github.com/harishhary/blink/pkg/rules/config" ) var ruleManagerMetrics = plugin.NewPluginManagerMetrics("rulesvc") -func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[Rule] { - return plugin.NewPluginManager[Rule](log, notify, dir, &RuleAdapter{Watcher: watcher}, ruleManagerMetrics) +type RulePluginManager = plugin.PluginManager[Rule] + +func NewRulePluginManager(log *logger.Logger, notify plugin.Notify, dir string, manager *RuleConfigManager) *RulePluginManager { + return plugin.NewPluginManager[Rule](log, notify, dir, &RuleAdapter{Manager: manager}, ruleManagerMetrics) } diff --git a/pkg/rules/manager_test.go b/pkg/rules/manager_test.go index a107ba4..b346e2c 100644 --- a/pkg/rules/manager_test.go +++ b/pkg/rules/manager_test.go @@ -11,6 +11,7 @@ import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/messaging" "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/rules" "github.com/harishhary/blink/pkg/rules/config" ) @@ -22,11 +23,11 @@ const ( ) // testSidecarYAML is the YAML sidecar for the simple_rule test plugin binary. -// The file_name must match the binary base name ("simple_rule"). +// The name field must match the binary base name ("simple_rule"). const testSidecarYAML = ` id: "test-simple-rule-id" -name: "simple-rule" -file_name: "simple_rule" +name: "simple_rule" +display_name: "simple-rule" description: "always matches - used for integration tests" enabled: true version: "1.0.0" @@ -121,20 +122,18 @@ func TestManagerHotReload(t *testing.T) { // available when the binary appears. writeSidecar(t, dir) - cfgWatcher, err := config.NewWatcher(dir) - if err != nil { - t.Fatalf("config watcher: %v", err) - } + cfgMgr := config.NewRuleConfigManager(logger.New("test-config", "dev"), dir) + cfgSvc := services.NewConfigSyncService("test-config", "test-config", cfgMgr) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - go cfgWatcher.Run(ctx) //nolint:errcheck + go cfgSvc.Run(ctx) //nolint:errcheck // Use a buffered channel as the notify sink - replaces the old message bus. events := make(chan messaging.Message, 64) notify := func(msg messaging.Message) { events <- msg } log := logger.New("rules-manager-test", "dev") - mgr := rules.NewManager(log, notify, dir, cfgWatcher) + mgr := rules.NewRulePluginManager(log, notify, dir, cfgMgr) if err := mgr.Start(context.Background()); err != nil { t.Fatalf("Start: %v", err) } @@ -142,7 +141,7 @@ func TestManagerHotReload(t *testing.T) { // Build and drop the plugin binary - manager should pick it up. binPath := buildPlugin(t, dir) - if !waitForRegister(t, events, "simple-rule", registerTimeout) { + if !waitForRegister(t, events, "simple_rule", registerTimeout) { t.Fatal("timed out waiting for RegisterMessage after binary appears") } diff --git a/pkg/rules/pool/pool.go b/pkg/rules/pool.go similarity index 66% rename from pkg/rules/pool/pool.go rename to pkg/rules/pool.go index 2d94a31..cb7c672 100644 --- a/pkg/rules/pool/pool.go +++ b/pkg/rules/pool.go @@ -1,4 +1,4 @@ -package pool +package rules import ( "context" @@ -9,32 +9,30 @@ import ( "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules" - "github.com/harishhary/blink/pkg/rules/config" ) type Pool struct { - *internal.ProcessPool[rules.Rule] - watcher *config.Watcher + *internal.ProcessPool[Rule] + manager *RuleConfigManager } -func NewPool(watcher *config.Watcher, drainTimeout time.Duration) *Pool { +func NewPool(manager *RuleConfigManager, drainTimeout time.Duration) *Pool { routing := func(id string) (internal.RolloutMode, float64) { - re := watcher.Current().RoutingByID(id) + re := manager.Current().RoutingByID(id) return re.Mode, re.RolloutPct } return &Pool{ - ProcessPool: internal.NewProcessPool[rules.Rule](routing, internal.NewPoolMetrics("rules"), drainTimeout), - watcher: watcher, + ProcessPool: internal.NewProcessPool[Rule](routing, internal.NewPoolMetrics("rules"), drainTimeout), + manager: manager, } } // Evaluate runs all evts against the rule identified by ruleID in a single pool call. -func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]rules.EvalResult, errors.Error) { - var results []rules.EvalResult - err := p.Call(ctx, ruleID, canaryHashKey, func(ctx context.Context, r rules.Rule) error { +func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, canaryHashKey string) ([]EvalResult, errors.Error) { + var results []EvalResult + err := p.Call(ctx, ruleID, canaryHashKey, func(ctx context.Context, r Rule) error { if !r.RuleMetadata().Enabled { - results = make([]rules.EvalResult, len(evts)) + results = make([]EvalResult, len(evts)) return nil } var e errors.Error @@ -51,7 +49,7 @@ func (p *Pool) Evaluate(ctx context.Context, ruleID string, evts []events.Event, // Combining the YAML version with the binary checksum means a binary change // always produces a distinct key even if the operator forgot to bump the version // string in the rule config - preventing silent same-key overwrites in the pool. -func poolKey(r rules.Rule) internal.PoolKey { +func poolKey(r Rule) internal.PoolKey { cfg := r.RuleMetadata() return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: r.Checksum()} } @@ -59,13 +57,13 @@ func poolKey(r rules.Rule) internal.PoolKey { // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering rules in the pool. func (p *Pool) Sync(msg messaging.Message) { switch m := msg.(type) { - case plugin.RegisterMessage[rules.Rule]: + case plugin.RegisterMessage[Rule]: p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, nil) - case plugin.UpdateMessage[rules.Rule]: + case plugin.UpdateMessage[Rule]: p.Register(poolKey(m.Items[0]), m.Items, m.MaxProcs, m.OnDrained) - case plugin.UnregisterMessage[rules.Rule]: + case plugin.UnregisterMessage[Rule]: p.Unregister(m.ItemKey) - case plugin.RemoveMessage[rules.Rule]: + case plugin.RemoveMessage[Rule]: p.Remove(m.ItemKey) } } diff --git a/pkg/rules/rpc_rules.go b/pkg/rules/rpc_rules.go index f0e91ea..62de236 100644 --- a/pkg/rules/rpc_rules.go +++ b/pkg/rules/rpc_rules.go @@ -7,41 +7,44 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/config" "github.com/harishhary/blink/pkg/rules/rpc_rules" ) // This is the executor-side wrapper for a live rule subprocess. type rpcRule struct { client rpc_rules.RuleClient - cfgWatcher *config.Watcher + cfgManager *RuleConfigManager fileName string checksum string // SHA-256 of the binary } -func newRpcRule(fileName string, client rpc_rules.RuleClient, watcher *config.Watcher, checksum string) *rpcRule { +func newRpcRule(fileName string, client rpc_rules.RuleClient, manager *RuleConfigManager, checksum string) *rpcRule { return &rpcRule{ client: client, - cfgWatcher: watcher, + cfgManager: manager, fileName: fileName, checksum: checksum, } } -func (r *rpcRule) cfg() *config.RuleMetadata { - if r.cfgWatcher == nil { +func (r *rpcRule) cfg() *RuleMetadata { + if r.cfgManager == nil { return nil } - return r.cfgWatcher.Current().ByFileName(r.fileName) + v, ok := r.cfgManager.Current().ByFileName(r.fileName) + if !ok { + return nil + } + return v } // RuleMetadata returns the live YAML-derived rule configuration for this plugin. -func (r *rpcRule) RuleMetadata() *config.RuleMetadata { +func (r *rpcRule) RuleMetadata() *RuleMetadata { if c := r.cfg(); c != nil { return c } // Return a minimal stub so callers don't need to nil-check. - return &config.RuleMetadata{PluginMetadata: plugin.PluginMetadata{FileName: r.fileName, Name: r.fileName, Id: r.fileName}} + return &RuleMetadata{PluginMetadata: plugin.PluginMetadata{Name: r.fileName, Id: r.fileName}} } func (r *rpcRule) Checksum() string { return r.checksum } diff --git a/pkg/rules/rule.go b/pkg/rules/rule.go index 8034066..80fbb3c 100644 --- a/pkg/rules/rule.go +++ b/pkg/rules/rule.go @@ -6,11 +6,9 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/config" ) type PluginMetadata = plugin.PluginMetadata -type RuleMetadata = config.RuleMetadata // EvalResult is the per-event outcome returned by Rule.Evaluate. // Fields beyond Matched are populated only when the plugin implements the diff --git a/pkg/rules/sdk/serve.go b/pkg/rules/serve.go similarity index 87% rename from pkg/rules/sdk/serve.go rename to pkg/rules/serve.go index de299c0..3d1e95d 100644 --- a/pkg/rules/sdk/serve.go +++ b/pkg/rules/serve.go @@ -1,4 +1,4 @@ -package sdk +package rules import ( "context" @@ -62,14 +62,14 @@ type RulePlugin interface { // Embed in your rule struct and override only what you need. type BaseRule struct{} -func (BaseRule) Init() error { return nil } -func (BaseRule) Shutdown() error { return nil } -func (BaseRule) AlertTitle(_ events.Event) string { return "" } -func (BaseRule) AlertDescription(_ events.Event) string { return "" } -func (BaseRule) AlertSeverity(_ events.Event) string { return "" } -func (BaseRule) AlertContext(_ events.Event) map[string]any { return nil } -func (BaseRule) AlertMergeByKeys(_ events.Event) []string { return nil } -func (BaseRule) AlertReqSubkeys(_ events.Event) bool { return true } +func (BaseRule) Init() error { return nil } +func (BaseRule) Shutdown() error { return nil } +func (BaseRule) AlertTitle(_ events.Event) string { return "" } +func (BaseRule) AlertDescription(_ events.Event) string { return "" } +func (BaseRule) AlertSeverity(_ events.Event) string { return "" } +func (BaseRule) AlertContext(_ events.Event) map[string]any { return nil } +func (BaseRule) AlertMergeByKeys(_ events.Event) []string { return nil } +func (BaseRule) AlertReqSubkeys(_ events.Event) bool { return true } // server wraps a RulePlugin and serves the gRPC RuleServer interface. type server struct { diff --git a/pkg/tuning_rules/launcher.go b/pkg/tuning_rules/adapter.go similarity index 80% rename from pkg/tuning_rules/launcher.go rename to pkg/tuning_rules/adapter.go index a7d3cf7..b4bcbb7 100644 --- a/pkg/tuning_rules/launcher.go +++ b/pkg/tuning_rules/adapter.go @@ -11,16 +11,15 @@ import ( "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/internal/plugin" internal "github.com/harishhary/blink/internal/pools" - "github.com/harishhary/blink/pkg/tuning_rules/config" "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rules" ) type TuningRuleAdapter struct { - Watcher *config.Watcher + Manager *TuningRuleConfigManager } -func (l *TuningRuleAdapter) PluginKey() string { return "tuning_rule" } -func (l *TuningRuleAdapter) MagicValue() string { return "tuning_rule_v1" } +func (l *TuningRuleAdapter) PluginKey() string { return "tuning_rule" } +func (l *TuningRuleAdapter) MagicValue() string { return "tuning_rule_v1" } func (l *TuningRuleAdapter) GRPCPlugin() goplugin.Plugin { return &tuningPlugin{} } // Handshake connects to the tuning rule subprocess, calls Init, and returns a @@ -40,8 +39,8 @@ func (l *TuningRuleAdapter) Handshake(ctx context.Context, raw interface{}, binP return nil, nil, "", "", fmt.Errorf("init: %w", err) } - tr := newRpcTuningRule(fileName, rpc, l.Watcher, hash) - cfg, ok := l.Watcher.Current().ByFileName(fileName) + tr := newRpcTuningRule(fileName, rpc, l.Manager, hash) + cfg, ok := l.Manager.Current().ByFileName(fileName) id, name := fileName, fileName if ok { id = cfg.Id @@ -52,13 +51,13 @@ func (l *TuningRuleAdapter) Handshake(ctx context.Context, raw interface{}, binP // IsReady reports whether this binary's YAML sidecar exists in the current registry. func (l *TuningRuleAdapter) IsReady(binPath string) bool { - _, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + _, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) return ok } // IsShadow reports whether this binary's YAML declares it as a shadow or canary version. func (l *TuningRuleAdapter) IsShadow(binPath string) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok { return false } @@ -68,12 +67,12 @@ func (l *TuningRuleAdapter) IsShadow(binPath string) bool { // IsEnabled reports whether the tuning rule's YAML sidecar still exists and is enabled. func (l *TuningRuleAdapter) IsEnabled(h *plugin.PluginHandle) bool { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(h.BinPath)) return ok && cfg.Enabled } func (l *TuningRuleAdapter) Workers(binPath string) int { - cfg, ok := l.Watcher.Current().ByFileName(helpers.BinaryBaseName(binPath)) + cfg, ok := l.Manager.Current().ByFileName(helpers.BinaryBaseName(binPath)) if !ok || cfg.MaxProcs <= 0 { return 1 } @@ -94,7 +93,9 @@ func (l *tuningLifecycle) Shutdown(ctx context.Context) error { return err } -type tuningPlugin struct{ goplugin.NetRPCUnsupportedPlugin } +type tuningPlugin struct { + goplugin.NetRPCUnsupportedPlugin +} func (p *tuningPlugin) GRPCServer(_ *goplugin.GRPCBroker, _ *grpc.Server) error { return nil } func (p *tuningPlugin) GRPCClient(_ context.Context, _ *goplugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { diff --git a/pkg/tuning_rules/config.go b/pkg/tuning_rules/config.go new file mode 100644 index 0000000..81a8c77 --- /dev/null +++ b/pkg/tuning_rules/config.go @@ -0,0 +1,45 @@ +// Each tuning rule binary ships alongside a .yaml sidecar file. +// +// YAML schema example: +// +// id: "550e8400-e29b-41d4-a716-446655440003" +// name: "noisy-hosts" +// display_name: "Noisy Hosts Suppressor" +// description: "Ignores alerts from known-noisy infrastructure hosts." +// enabled: true +// version: "1.0.0" +// file_name: "noisy-hosts" +// global: false +// rule_type: "ignore" # ignore | set_confidence | increase_confidence | decrease_confidence +// confidence: "" # only used when rule_type is *_confidence (e.g. "0.8" or "medium") +// mode: "blue-green" +// min_procs: 1 +// max_procs: 2 + +package tuning_rules + +import ( + cfg "github.com/harishhary/blink/internal/config" + "github.com/harishhary/blink/internal/logger" + "github.com/harishhary/blink/internal/plugin" +) + +// TuningMetadata is the in-memory representation of a tuning rule YAML sidecar. +type TuningRuleMetadata struct { + plugin.PluginMetadata `yaml:",inline"` + Global bool `yaml:"global"` + RuleType string `yaml:"rule_type"` // "ignore", "set_confidence", "increase_confidence", "decrease_confidence" + Confidence string `yaml:"confidence"` // meaningful only for *_confidence rule types +} + +type TuningRuleConfigManager = cfg.ConfigManager[*TuningRuleMetadata] + +// Loader implements cfg.Loader[*TuningMetadata] for tuning rules. +// Embed cfg.BaseLoader to inherit default Parse, Validate, and CrossValidate. +type Loader struct { + cfg.BaseLoader[TuningRuleMetadata, *TuningRuleMetadata] +} + +func NewTuningRuleConfigManager(log *logger.Logger, dir string) *TuningRuleConfigManager { + return cfg.NewConfigManager[*TuningRuleMetadata](log, "tuning_rule", dir, Loader{}) +} diff --git a/pkg/tuning_rules/config/config.go b/pkg/tuning_rules/config/config.go deleted file mode 100644 index a2ee612..0000000 --- a/pkg/tuning_rules/config/config.go +++ /dev/null @@ -1,76 +0,0 @@ -// Each tuning rule binary ships alongside a .yaml sidecar file. -// -// YAML schema example: -// -// id: "550e8400-e29b-41d4-a716-446655440003" -// name: "noisy-hosts" -// display_name: "Noisy Hosts Suppressor" -// description: "Ignores alerts from known-noisy infrastructure hosts." -// enabled: true -// version: "1.0.0" -// file_name: "noisy-hosts" -// global: false -// rule_type: "ignore" # ignore | set_confidence | increase_confidence | decrease_confidence -// confidence: "" # only used when rule_type is *_confidence (e.g. "0.8" or "medium") -// mode: "blue-green" -// min_procs: 1 -// max_procs: 2 - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/harishhary/blink/internal/plugin" - "go.yaml.in/yaml/v4" -) - -// TuningMetadata is the in-memory representation of a tuning rule YAML sidecar. -type TuningMetadata struct { - plugin.PluginMetadata `yaml:",inline"` - Global bool `yaml:"global"` - RuleType string `yaml:"rule_type"` // "ignore", "set_confidence", "increase_confidence", "decrease_confidence" - Confidence string `yaml:"confidence"` // meaningful only for *_confidence rule types -} - -// loader implements plugin.Loader[*TuningMetadata]. -type loader struct{} - -func (loader) Load(path string) (*TuningMetadata, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("tuning config: read %s: %w", path, err) - } - var cfg TuningMetadata - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("tuning config: parse %s: %w", path, err) - } - if cfg.Name == "" { - return nil, fmt.Errorf("tuning config: %s: name is required", path) - } - if cfg.FileName == "" { - base := filepath.Base(path) - cfg.FileName = strings.TrimSuffix(base, filepath.Ext(base)) - } - if cfg.Id == "" { - cfg.Id = cfg.FileName - } - return &cfg, nil -} - -func (loader) Validate(all []*TuningMetadata) error { return nil } - -// Registry and Watcher are the generic implementations parameterised for tuning rules. -type Registry = plugin.Registry[*TuningMetadata] -type Watcher = plugin.Watcher[*TuningMetadata] - -func NewRegistry(dir string) (*Registry, error) { - return plugin.NewRegistry(dir, "tuning_rule", loader{}) -} - -func NewWatcher(dir string) (*Watcher, error) { - return plugin.NewWatcher("tuning-config-watcher", dir, "tuning_rule", loader{}) -} diff --git a/pkg/tuning_rules/manager.go b/pkg/tuning_rules/manager.go index 87e2b93..92c3356 100644 --- a/pkg/tuning_rules/manager.go +++ b/pkg/tuning_rules/manager.go @@ -3,11 +3,12 @@ package tuning_rules import ( "github.com/harishhary/blink/internal/logger" "github.com/harishhary/blink/internal/plugin" - "github.com/harishhary/blink/pkg/tuning_rules/config" ) var tuningManagerMetrics = plugin.NewPluginManagerMetrics("tuning_rules") -func NewManager(log *logger.Logger, notify plugin.Notify, dir string, watcher *config.Watcher) *plugin.PluginManager[TuningRule] { - return plugin.NewPluginManager[TuningRule](log, notify, dir, &TuningRuleAdapter{Watcher: watcher}, tuningManagerMetrics) +type TuningRulePluginManager = plugin.PluginManager[TuningRule] + +func NewTuningRulePluginManager(log *logger.Logger, notify plugin.Notify, dir string, manager *TuningRuleConfigManager) *TuningRulePluginManager { + return plugin.NewPluginManager[TuningRule](log, notify, dir, &TuningRuleAdapter{Manager: manager}, tuningManagerMetrics) } diff --git a/pkg/tuning_rules/pool/pool.go b/pkg/tuning_rules/pool.go similarity index 66% rename from pkg/tuning_rules/pool/pool.go rename to pkg/tuning_rules/pool.go index 21cef41..212507d 100644 --- a/pkg/tuning_rules/pool/pool.go +++ b/pkg/tuning_rules/pool.go @@ -1,4 +1,4 @@ -package pool +package tuning_rules import ( "context" @@ -10,27 +10,26 @@ import ( internal "github.com/harishhary/blink/internal/pools" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" - tuning "github.com/harishhary/blink/pkg/tuning_rules" ) type Pool struct { - *internal.ProcessPool[tuning.TuningRule] + *internal.ProcessPool[TuningRule] } func NewPool(routing *internal.RoutingTable, drainTimeout time.Duration) *Pool { return &Pool{ - ProcessPool: internal.NewProcessPool[tuning.TuningRule](routing.Config(), internal.NewPoolMetrics("tuning_rules"), drainTimeout), + ProcessPool: internal.NewProcessPool[TuningRule](routing.Config(), internal.NewPoolMetrics("tuning_rules"), drainTimeout), } } // Tune calls tuningRuleID once with all alerts, returning per-alert apply results. // ruleType and confidence are rule metadata - the same for every alert in the batch. func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Alert, canaryHashKey string) ( - ruleType tuning.RuleType, confidence scoring.Confidence, applies []bool, _ errors.Error, + ruleType RuleType, confidence scoring.Confidence, applies []bool, _ errors.Error, ) { applies = make([]bool, len(alerts)) - err := p.Call(ctx, tuningRuleID, canaryHashKey, func(callCtx context.Context, t tuning.TuningRule) error { - if !t.TuningMetadata().Enabled { + err := p.Call(ctx, tuningRuleID, canaryHashKey, func(callCtx context.Context, t TuningRule) error { + if !t.TuningRuleMetadata().Enabled { return nil } ruleType = t.RuleType() @@ -46,23 +45,23 @@ func (p *Pool) Tune(ctx context.Context, tuningRuleID string, alerts []alerts.Al } // Handles plugin lifecycle messages from the plugin manager bus, registering or deregistering tuning rules in the pool. -func poolKey(t tuning.TuningRule) internal.PoolKey { - cfg := t.TuningMetadata() +func poolKey(t TuningRule) internal.PoolKey { + cfg := t.TuningRuleMetadata() return internal.PoolKey{Id: cfg.Id, Version: cfg.Version, Hash: t.Checksum()} } func (p *Pool) Sync(msg messaging.Message) { - register := func(onDrained func(), items []tuning.TuningRule, maxProcs int) { + register := func(onDrained func(), items []TuningRule, maxProcs int) { p.Register(poolKey(items[0]), items, maxProcs, onDrained) } switch m := msg.(type) { - case plugin.RegisterMessage[tuning.TuningRule]: + case plugin.RegisterMessage[TuningRule]: register(nil, m.Items, m.MaxProcs) - case plugin.UpdateMessage[tuning.TuningRule]: + case plugin.UpdateMessage[TuningRule]: register(m.OnDrained, m.Items, m.MaxProcs) - case plugin.UnregisterMessage[tuning.TuningRule]: + case plugin.UnregisterMessage[TuningRule]: p.Unregister(m.ItemKey) - case plugin.RemoveMessage[tuning.TuningRule]: + case plugin.RemoveMessage[TuningRule]: p.Remove(m.ItemKey) } } diff --git a/pkg/tuning_rules/rpc_tuning_rule.go b/pkg/tuning_rules/rpc_tuning_rule.go index b5c5df3..651dfe4 100644 --- a/pkg/tuning_rules/rpc_tuning_rule.go +++ b/pkg/tuning_rules/rpc_tuning_rule.go @@ -9,57 +9,56 @@ import ( "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" - "github.com/harishhary/blink/pkg/tuning_rules/config" "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rules" ) type rpcTuningRule struct { - cfgWatcher *config.Watcher + cfgManager *TuningRuleConfigManager fileName string checksum string client rpc_tuning_rules.TuningRuleClient } -func newRpcTuningRule(fileName string, client rpc_tuning_rules.TuningRuleClient, watcher *config.Watcher, checksum string) *rpcTuningRule { +func newRpcTuningRule(fileName string, client rpc_tuning_rules.TuningRuleClient, manager *TuningRuleConfigManager, checksum string) *rpcTuningRule { return &rpcTuningRule{ - cfgWatcher: watcher, + cfgManager: manager, fileName: fileName, checksum: checksum, client: client, } } -func (r *rpcTuningRule) cfg() *config.TuningMetadata { - if r.cfgWatcher == nil { +func (r *rpcTuningRule) cfg() *TuningRuleMetadata { + if r.cfgManager == nil { return nil } - v, _ := r.cfgWatcher.Current().ByFileName(r.fileName) + v, _ := r.cfgManager.Current().ByFileName(r.fileName) return v } // TuningMetadata returns the live YAML-derived tuning rule configuration. -func (r *rpcTuningRule) TuningMetadata() *config.TuningMetadata { +func (r *rpcTuningRule) TuningRuleMetadata() *TuningRuleMetadata { if c := r.cfg(); c != nil { return c } - return &config.TuningMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName, FileName: r.fileName}} + return &TuningRuleMetadata{PluginMetadata: plugin.PluginMetadata{Id: r.fileName, Name: r.fileName}} } func (r *rpcTuningRule) Metadata() plugin.PluginMetadata { - return r.TuningMetadata().Metadata() + return r.TuningRuleMetadata().Metadata() } func (r *rpcTuningRule) Checksum() string { return r.checksum } func (r *rpcTuningRule) String() string { - m := r.TuningMetadata().Metadata() + m := r.TuningRuleMetadata().Metadata() return fmt.Sprintf("TuningRule '%s' (id:%s, enabled:%t)", m.Name, m.Id, m.Enabled) } -func (r *rpcTuningRule) Global() bool { return r.TuningMetadata().Global } +func (r *rpcTuningRule) Global() bool { return r.TuningRuleMetadata().Global } // RuleType parses the YAML rule_type string into a typed RuleType constant. func (r *rpcTuningRule) RuleType() RuleType { - switch r.TuningMetadata().RuleType { + switch r.TuningRuleMetadata().RuleType { case "set_confidence": return SetConfidence case "increase_confidence": @@ -73,7 +72,7 @@ func (r *rpcTuningRule) RuleType() RuleType { // Confidence parses the YAML confidence string into a scoring.Confidence value. func (r *rpcTuningRule) Confidence() scoring.Confidence { - conf, _ := scoring.ParseConfidence(r.TuningMetadata().Confidence) + conf, _ := scoring.ParseConfidence(r.TuningRuleMetadata().Confidence) return conf } diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go index bfa408a..dd50c06 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go @@ -57,204 +57,16 @@ func (*Empty) Descriptor() ([]byte, []int) { return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{0} } -type TuningMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global,omitempty"` - RuleType int32 `protobuf:"varint,6,opt,name=rule_type,json=ruleType,proto3" json:"rule_type,omitempty"` // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence - Confidence string `protobuf:"bytes,7,opt,name=confidence,proto3" json:"confidence,omitempty"` // "verylow|low|medium|high|veryhigh" - Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TuningMetadata) Reset() { - *x = TuningMetadata{} - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TuningMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TuningMetadata) ProtoMessage() {} - -func (x *TuningMetadata) ProtoReflect() protoreflect.Message { - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TuningMetadata.ProtoReflect.Descriptor instead. -func (*TuningMetadata) Descriptor() ([]byte, []int) { - return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{1} -} - -func (x *TuningMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *TuningMetadata) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *TuningMetadata) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *TuningMetadata) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *TuningMetadata) GetGlobal() bool { - if x != nil { - return x.Global - } - return false -} - -func (x *TuningMetadata) GetRuleType() int32 { - if x != nil { - return x.RuleType - } - return 0 -} - -func (x *TuningMetadata) GetConfidence() string { - if x != nil { - return x.Confidence - } - return "" -} - -func (x *TuningMetadata) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -type TuneRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - AlertJson []byte `protobuf:"bytes,1,opt,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // JSON-encoded alerts.Alert - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TuneRequest) Reset() { - *x = TuneRequest{} - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TuneRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TuneRequest) ProtoMessage() {} - -func (x *TuneRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TuneRequest.ProtoReflect.Descriptor instead. -func (*TuneRequest) Descriptor() ([]byte, []int) { - return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{2} -} - -func (x *TuneRequest) GetAlertJson() []byte { - if x != nil { - return x.AlertJson - } - return nil -} - -type TuneResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Applies bool `protobuf:"varint,1,opt,name=applies,proto3" json:"applies,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TuneResponse) Reset() { - *x = TuneResponse{} - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TuneResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TuneResponse) ProtoMessage() {} - -func (x *TuneResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TuneResponse.ProtoReflect.Descriptor instead. -func (*TuneResponse) Descriptor() ([]byte, []int) { - return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{3} -} - -func (x *TuneResponse) GetApplies() bool { - if x != nil { - return x.Applies - } - return false -} - type TuneBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded alerts.Alert per alert + AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded map[string]any per alert unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *TuneBatchRequest) Reset() { *x = TuneBatchRequest{} - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[4] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -266,7 +78,7 @@ func (x *TuneBatchRequest) String() string { func (*TuneBatchRequest) ProtoMessage() {} func (x *TuneBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[4] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -279,7 +91,7 @@ func (x *TuneBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TuneBatchRequest.ProtoReflect.Descriptor instead. func (*TuneBatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{4} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{1} } func (x *TuneBatchRequest) GetAlertJson() [][]byte { @@ -298,7 +110,7 @@ type TuneBatchResponse struct { func (x *TuneBatchResponse) Reset() { *x = TuneBatchResponse{} - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[5] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -310,7 +122,7 @@ func (x *TuneBatchResponse) String() string { func (*TuneBatchResponse) ProtoMessage() {} func (x *TuneBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[5] + mi := &file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -323,7 +135,7 @@ func (x *TuneBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TuneBatchResponse.ProtoReflect.Descriptor instead. func (*TuneBatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{5} + return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP(), []int{2} } func (x *TuneBatchResponse) GetApplies() []bool { @@ -338,33 +150,15 @@ var File_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto protoreflect.FileDe const file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc = "" + "\n" + "3pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto\x12\ftuning_rules\"\a\n" + - "\x05Empty\"\xdf\x01\n" + - "\x0eTuningMetadata\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + - "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x16\n" + - "\x06global\x18\x05 \x01(\bR\x06global\x12\x1b\n" + - "\trule_type\x18\x06 \x01(\x05R\bruleType\x12\x1e\n" + - "\n" + - "confidence\x18\a \x01(\tR\n" + - "confidence\x12\x18\n" + - "\aversion\x18\b \x01(\tR\aversion\",\n" + - "\vTuneRequest\x12\x1d\n" + - "\n" + - "alert_json\x18\x01 \x01(\fR\talertJson\"(\n" + - "\fTuneResponse\x12\x18\n" + - "\aapplies\x18\x01 \x01(\bR\aapplies\"1\n" + + "\x05Empty\"1\n" + "\x10TuneBatchRequest\x12\x1d\n" + "\n" + "alert_json\x18\x01 \x03(\fR\talertJson\"-\n" + "\x11TuneBatchResponse\x12\x18\n" + - "\aapplies\x18\x01 \x03(\bR\aapplies2\xf5\x02\n" + + "\aapplies\x18\x01 \x03(\bR\aapplies2\xf4\x01\n" + "\n" + - "TuningRule\x12@\n" + - "\vGetMetadata\x12\x13.tuning_rules.Empty\x1a\x1c.tuning_rules.TuningMetadata\x120\n" + - "\x04Init\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x12=\n" + - "\x04Tune\x12\x19.tuning_rules.TuneRequest\x1a\x1a.tuning_rules.TuneResponse\x12L\n" + + "TuningRule\x120\n" + + "\x04Init\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x12L\n" + "\tTuneBatch\x12\x1e.tuning_rules.TuneBatchRequest\x1a\x1f.tuning_rules.TuneBatchResponse\x124\n" + "\bShutdown\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x120\n" + "\x04Ping\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.EmptyB?Z=github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rulesb\x06proto3" @@ -381,30 +175,23 @@ func file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescGZIP() []by return file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescData } -var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_goTypes = []any{ (*Empty)(nil), // 0: tuning_rules.Empty - (*TuningMetadata)(nil), // 1: tuning_rules.TuningMetadata - (*TuneRequest)(nil), // 2: tuning_rules.TuneRequest - (*TuneResponse)(nil), // 3: tuning_rules.TuneResponse - (*TuneBatchRequest)(nil), // 4: tuning_rules.TuneBatchRequest - (*TuneBatchResponse)(nil), // 5: tuning_rules.TuneBatchResponse + (*TuneBatchRequest)(nil), // 1: tuning_rules.TuneBatchRequest + (*TuneBatchResponse)(nil), // 2: tuning_rules.TuneBatchResponse } var file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_depIdxs = []int32{ - 0, // 0: tuning_rules.TuningRule.GetMetadata:input_type -> tuning_rules.Empty - 0, // 1: tuning_rules.TuningRule.Init:input_type -> tuning_rules.Empty - 2, // 2: tuning_rules.TuningRule.Tune:input_type -> tuning_rules.TuneRequest - 4, // 3: tuning_rules.TuningRule.TuneBatch:input_type -> tuning_rules.TuneBatchRequest - 0, // 4: tuning_rules.TuningRule.Shutdown:input_type -> tuning_rules.Empty - 0, // 5: tuning_rules.TuningRule.Ping:input_type -> tuning_rules.Empty - 1, // 6: tuning_rules.TuningRule.GetMetadata:output_type -> tuning_rules.TuningMetadata - 0, // 7: tuning_rules.TuningRule.Init:output_type -> tuning_rules.Empty - 3, // 8: tuning_rules.TuningRule.Tune:output_type -> tuning_rules.TuneResponse - 5, // 9: tuning_rules.TuningRule.TuneBatch:output_type -> tuning_rules.TuneBatchResponse - 0, // 10: tuning_rules.TuningRule.Shutdown:output_type -> tuning_rules.Empty - 0, // 11: tuning_rules.TuningRule.Ping:output_type -> tuning_rules.Empty - 6, // [6:12] is the sub-list for method output_type - 0, // [0:6] is the sub-list for method input_type + 0, // 0: tuning_rules.TuningRule.Init:input_type -> tuning_rules.Empty + 1, // 1: tuning_rules.TuningRule.TuneBatch:input_type -> tuning_rules.TuneBatchRequest + 0, // 2: tuning_rules.TuningRule.Shutdown:input_type -> tuning_rules.Empty + 0, // 3: tuning_rules.TuningRule.Ping:input_type -> tuning_rules.Empty + 0, // 4: tuning_rules.TuningRule.Init:output_type -> tuning_rules.Empty + 2, // 5: tuning_rules.TuningRule.TuneBatch:output_type -> tuning_rules.TuneBatchResponse + 0, // 6: tuning_rules.TuningRule.Shutdown:output_type -> tuning_rules.Empty + 0, // 7: tuning_rules.TuningRule.Ping:output_type -> tuning_rules.Empty + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -421,7 +208,7 @@ func file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc), len(file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto index 1edbafc..59721a4 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto @@ -6,27 +6,8 @@ option go_package = "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rul message Empty {} -message TuningMetadata { - string id = 1; - string name = 2; - string description = 3; - bool enabled = 4; - bool global = 5; - int32 rule_type = 6; // 0=Ignore, 1=SetConfidence, 2=IncreaseConfidence, 3=DecreaseConfidence - string confidence = 7; // "verylow|low|medium|high|veryhigh" - string version = 8; -} - -message TuneRequest { - bytes alert_json = 1; // JSON-encoded alerts.Alert -} - -message TuneResponse { - bool applies = 1; -} - message TuneBatchRequest { - repeated bytes alert_json = 1; // one JSON-encoded alerts.Alert per alert + repeated bytes alert_json = 1; // one JSON-encoded map[string]any per alert } message TuneBatchResponse { @@ -34,9 +15,7 @@ message TuneBatchResponse { } service TuningRule { - rpc GetMetadata(Empty) returns (TuningMetadata); rpc Init(Empty) returns (Empty); - rpc Tune(TuneRequest) returns (TuneResponse); rpc TuneBatch(TuneBatchRequest) returns (TuneBatchResponse); rpc Shutdown(Empty) returns (Empty); rpc Ping(Empty) returns (Empty); diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go index 18328d5..eb91c49 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule_grpc.pb.go @@ -19,21 +19,17 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - TuningRule_GetMetadata_FullMethodName = "/tuning_rules.TuningRule/GetMetadata" - TuningRule_Init_FullMethodName = "/tuning_rules.TuningRule/Init" - TuningRule_Tune_FullMethodName = "/tuning_rules.TuningRule/Tune" - TuningRule_TuneBatch_FullMethodName = "/tuning_rules.TuningRule/TuneBatch" - TuningRule_Shutdown_FullMethodName = "/tuning_rules.TuningRule/Shutdown" - TuningRule_Ping_FullMethodName = "/tuning_rules.TuningRule/Ping" + TuningRule_Init_FullMethodName = "/tuning_rules.TuningRule/Init" + TuningRule_TuneBatch_FullMethodName = "/tuning_rules.TuningRule/TuneBatch" + TuningRule_Shutdown_FullMethodName = "/tuning_rules.TuningRule/Shutdown" + TuningRule_Ping_FullMethodName = "/tuning_rules.TuningRule/Ping" ) // TuningRuleClient is the client API for TuningRule service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TuningRuleClient interface { - GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TuningMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - Tune(ctx context.Context, in *TuneRequest, opts ...grpc.CallOption) (*TuneResponse, error) TuneBatch(ctx context.Context, in *TuneBatchRequest, opts ...grpc.CallOption) (*TuneBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -47,16 +43,6 @@ func NewTuningRuleClient(cc grpc.ClientConnInterface) TuningRuleClient { return &tuningRuleClient{cc} } -func (c *tuningRuleClient) GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TuningMetadata, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(TuningMetadata) - err := c.cc.Invoke(ctx, TuningRule_GetMetadata_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *tuningRuleClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -67,16 +53,6 @@ func (c *tuningRuleClient) Init(ctx context.Context, in *Empty, opts ...grpc.Cal return out, nil } -func (c *tuningRuleClient) Tune(ctx context.Context, in *TuneRequest, opts ...grpc.CallOption) (*TuneResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(TuneResponse) - err := c.cc.Invoke(ctx, TuningRule_Tune_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *tuningRuleClient) TuneBatch(ctx context.Context, in *TuneBatchRequest, opts ...grpc.CallOption) (*TuneBatchResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TuneBatchResponse) @@ -111,9 +87,7 @@ func (c *tuningRuleClient) Ping(ctx context.Context, in *Empty, opts ...grpc.Cal // All implementations must embed UnimplementedTuningRuleServer // for forward compatibility. type TuningRuleServer interface { - GetMetadata(context.Context, *Empty) (*TuningMetadata, error) Init(context.Context, *Empty) (*Empty, error) - Tune(context.Context, *TuneRequest) (*TuneResponse, error) TuneBatch(context.Context, *TuneBatchRequest) (*TuneBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) @@ -127,15 +101,9 @@ type TuningRuleServer interface { // pointer dereference when methods are called. type UnimplementedTuningRuleServer struct{} -func (UnimplementedTuningRuleServer) GetMetadata(context.Context, *Empty) (*TuningMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") -} func (UnimplementedTuningRuleServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } -func (UnimplementedTuningRuleServer) Tune(context.Context, *TuneRequest) (*TuneResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Tune not implemented") -} func (UnimplementedTuningRuleServer) TuneBatch(context.Context, *TuneBatchRequest) (*TuneBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TuneBatch not implemented") } @@ -166,24 +134,6 @@ func RegisterTuningRuleServer(s grpc.ServiceRegistrar, srv TuningRuleServer) { s.RegisterService(&TuningRule_ServiceDesc, srv) } -func _TuningRule_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TuningRuleServer).GetMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TuningRule_GetMetadata_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TuningRuleServer).GetMetadata(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _TuningRule_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -202,24 +152,6 @@ func _TuningRule_Init_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } -func _TuningRule_Tune_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TuneRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TuningRuleServer).Tune(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TuningRule_Tune_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TuningRuleServer).Tune(ctx, req.(*TuneRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _TuningRule_TuneBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TuneBatchRequest) if err := dec(in); err != nil { @@ -281,18 +213,10 @@ var TuningRule_ServiceDesc = grpc.ServiceDesc{ ServiceName: "tuning_rules.TuningRule", HandlerType: (*TuningRuleServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetadata", - Handler: _TuningRule_GetMetadata_Handler, - }, { MethodName: "Init", Handler: _TuningRule_Init_Handler, }, - { - MethodName: "Tune", - Handler: _TuningRule_Tune_Handler, - }, { MethodName: "TuneBatch", Handler: _TuningRule_TuneBatch_Handler, diff --git a/pkg/tuning_rules/sdk/serve.go b/pkg/tuning_rules/serve.go similarity index 99% rename from pkg/tuning_rules/sdk/serve.go rename to pkg/tuning_rules/serve.go index e7f4029..116bc71 100644 --- a/pkg/tuning_rules/sdk/serve.go +++ b/pkg/tuning_rules/serve.go @@ -1,4 +1,4 @@ -package sdk +package tuning_rules import ( "context" diff --git a/pkg/tuning_rules/tuning_rule.go b/pkg/tuning_rules/tuning_rule.go index 54cf9de..5e64654 100644 --- a/pkg/tuning_rules/tuning_rule.go +++ b/pkg/tuning_rules/tuning_rule.go @@ -7,11 +7,9 @@ import ( "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/pkg/alerts" "github.com/harishhary/blink/pkg/scoring" - "github.com/harishhary/blink/pkg/tuning_rules/config" ) type PluginMetadata = plugin.PluginMetadata -type TuningMetadata = config.TuningMetadata type RuleType int @@ -25,7 +23,7 @@ const ( type TuningRule interface { Tune(ctx context.Context, alerts []alerts.Alert) ([]bool, errors.Error) - TuningMetadata() *TuningMetadata + TuningRuleMetadata() *TuningRuleMetadata Metadata() PluginMetadata Global() bool RuleType() RuleType From 16db1a66781c3d43fcef87c031903c1e4297cbc3 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 16:02:36 +0100 Subject: [PATCH 10/14] adding examples --- examples/enrichments/add-geo/main.go | 6 +++--- examples/formatters/slack/main.go | 6 +++--- examples/matchers/allow-all/main.go | 6 +++--- examples/rules/failed-login/main.go | 6 +++--- examples/tuning_rules/boost-external-ip/main.go | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/enrichments/add-geo/main.go b/examples/enrichments/add-geo/main.go index 78f9251..3ab6da1 100644 --- a/examples/enrichments/add-geo/main.go +++ b/examples/enrichments/add-geo/main.go @@ -5,7 +5,7 @@ import ( "net" "github.com/harishhary/blink/internal/errors" - sdk "github.com/harishhary/blink/pkg/enrichments/sdk" + "github.com/harishhary/blink/pkg/enrichments" ) // addGeo annotates each alert with geo_country and geo_is_internal derived @@ -14,7 +14,7 @@ import ( // // All static metadata (name, id, enabled, depends_on, etc.) is declared in // the companion add-geo.yaml sidecar file. -type addGeo struct{ sdk.BaseEnrichment } +type addGeo struct{ enrichments.BaseEnrichment } var privateNets = mustParseCIDRs([]string{ "10.0.0.0/8", @@ -69,5 +69,5 @@ func (addGeo) Enrich(_ context.Context, alert map[string]any) (map[string]any, e } func main() { - sdk.Serve(addGeo{}) + enrichments.Serve(addGeo{}) } diff --git a/examples/formatters/slack/main.go b/examples/formatters/slack/main.go index 82812d9..7bf67bb 100644 --- a/examples/formatters/slack/main.go +++ b/examples/formatters/slack/main.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/harishhary/blink/internal/errors" - sdk "github.com/harishhary/blink/pkg/formatters/sdk" + "github.com/harishhary/blink/pkg/formatters" ) // slackFormatter converts an alert dict into a Slack Block Kit payload. @@ -14,7 +14,7 @@ import ( // // All static metadata (name, id, enabled, etc.) is declared in // the companion slack.yaml sidecar file. -type slackFormatter struct{ sdk.BaseFormatter } +type slackFormatter struct{ formatters.BaseFormatter } // Format receives the full alerts.Alert struct serialised to JSON. // alerts.Alert has no JSON struct tags, so all field names are PascalCase. @@ -47,5 +47,5 @@ func (slackFormatter) Format(_ context.Context, alert map[string]any) (map[strin } func main() { - sdk.Serve(slackFormatter{}) + formatters.Serve(slackFormatter{}) } diff --git a/examples/matchers/allow-all/main.go b/examples/matchers/allow-all/main.go index 73f1f6d..b1d08e8 100644 --- a/examples/matchers/allow-all/main.go +++ b/examples/matchers/allow-all/main.go @@ -5,18 +5,18 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/matchers/sdk" + "github.com/harishhary/blink/pkg/matchers" ) // allowAll matches every event. Use for testing only. // All static metadata (name, id, enabled, global, etc.) is declared in // the companion allow-all.yaml sidecar file. -type allowAll struct{ sdk.BaseMatcher } +type allowAll struct{ matchers.BaseMatcher } func (allowAll) Match(_ context.Context, _ events.Event) (bool, errors.Error) { return true, nil } func main() { - sdk.Serve(allowAll{}) + matchers.Serve(allowAll{}) } diff --git a/examples/rules/failed-login/main.go b/examples/rules/failed-login/main.go index db3ce1b..85460e4 100644 --- a/examples/rules/failed-login/main.go +++ b/examples/rules/failed-login/main.go @@ -7,7 +7,7 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/sdk" + "github.com/harishhary/blink/pkg/rules" ) // failedLogin fires when a login attempt is recorded as failed. @@ -15,7 +15,7 @@ import ( // // It overrides AlertTitle, AlertContext, and AlertSeverity to produce // richer alerts. All other sdk.BaseRule methods use their default (no-op) values. -type failedLogin struct{ sdk.BaseRule } +type failedLogin struct{ rules.BaseRule } func (failedLogin) Evaluate(_ context.Context, event events.Event) (bool, errors.Error) { action, _ := event["action"].(string) @@ -50,5 +50,5 @@ func (failedLogin) AlertSeverity(event events.Event) string { } func main() { - sdk.Serve(failedLogin{}) + rules.Serve(failedLogin{}) } diff --git a/examples/tuning_rules/boost-external-ip/main.go b/examples/tuning_rules/boost-external-ip/main.go index 2dc2324..de0b381 100644 --- a/examples/tuning_rules/boost-external-ip/main.go +++ b/examples/tuning_rules/boost-external-ip/main.go @@ -5,7 +5,7 @@ import ( "net" "github.com/harishhary/blink/internal/errors" - sdk "github.com/harishhary/blink/pkg/tuning_rules/sdk" + "github.com/harishhary/blink/pkg/tuning_rules" ) // boostExternalIP raises alert confidence when the source_ip is not in @@ -13,7 +13,7 @@ import ( // // All static metadata (name, id, enabled, global, rule_type, confidence, etc.) // is declared in the companion boost-external-ip.yaml sidecar file. -type boostExternalIP struct{ sdk.BaseTuningRule } +type boostExternalIP struct{ tuning_rules.BaseTuningRule } var privateNets = mustParseCIDRs([]string{ "10.0.0.0/8", @@ -57,5 +57,5 @@ func (boostExternalIP) Tune(_ context.Context, alert map[string]any) (bool, erro } func main() { - sdk.Serve(boostExternalIP{}) + tuning_rules.Serve(boostExternalIP{}) } From 8373966b0877509a0f31d9a38907741f458723c6 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 16:04:19 +0100 Subject: [PATCH 11/14] adding plugin and config manager --- internal/services/config_sync.go | 44 ++++++++++++++++++++++++++++++++ internal/services/plugin_sync.go | 19 +++++--------- 2 files changed, 51 insertions(+), 12 deletions(-) create mode 100644 internal/services/config_sync.go diff --git a/internal/services/config_sync.go b/internal/services/config_sync.go new file mode 100644 index 0000000..78e50c9 --- /dev/null +++ b/internal/services/config_sync.go @@ -0,0 +1,44 @@ +package services + +import ( + "context" + + svcctx "github.com/harishhary/blink/internal/context" + "github.com/harishhary/blink/internal/errors" + "github.com/harishhary/blink/internal/logger" + "github.com/harishhary/blink/internal/manager" +) + +// ConfigSyncService is the non-generic service wrapper for a ConfigManager, +// mirroring how PluginSyncService wraps PluginManager. It implements services.Service +// so it can be registered alongside other services in the Runner. +type ConfigSyncService struct { + svcctx.ServiceContext + serviceName string + manager manager.Manager +} + +// NewConfigSyncService creates a ConfigSyncService. name is the service name returned +// by Name(); displayName is used for the service context (logging). +func NewConfigSyncService(name, displayName string, manager manager.Manager) *ConfigSyncService { + sc := svcctx.New(displayName) + sc.Logger = logger.New(sc.Name(), "dev") + return &ConfigSyncService{ + ServiceContext: sc, + serviceName: name, + manager: manager, + } +} + +// Name returns the service name. +func (s *ConfigSyncService) Name() string { return s.serviceName } + +// Run starts the config manager (initial reconcile + fsnotify watch loop) and blocks +// until ctx is cancelled. Mirrors PluginSyncService.Run. +func (s *ConfigSyncService) Run(ctx context.Context) errors.Error { + if err := s.manager.Start(ctx); err != nil { + s.ErrorF("config manager start error: %v", err) + } + <-ctx.Done() + return nil +} diff --git a/internal/services/plugin_sync.go b/internal/services/plugin_sync.go index 69de457..a1b76af 100644 --- a/internal/services/plugin_sync.go +++ b/internal/services/plugin_sync.go @@ -2,29 +2,24 @@ package services import ( "context" - "os" svcctx "github.com/harishhary/blink/internal/context" "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/internal/configuration" "github.com/harishhary/blink/internal/logger" - "github.com/harishhary/blink/internal/plugin" + "github.com/harishhary/blink/internal/manager" ) type PluginSyncService struct { svcctx.ServiceContext serviceName string - plugin plugin.Plugin + manager manager.Manager } // NewPluginSyncService creates a service that starts the plugin manager and waits for -// context cancellation. newPluginManager is a closure that captures the pool's Sync -// callback so lifecycle events flow directly to the pool with no intermediate bus. -func NewPluginSyncService( - name, displayName, envVar string, - newPluginManager func(*logger.Logger, string) plugin.Plugin, -) (*PluginSyncService, error) { +// context cancellation. mgr is the pre-built plugin manager to run. +func NewPluginSyncService(name string, displayName string, manager manager.Manager) (*PluginSyncService, error) { sc := svcctx.New(displayName) if err := configuration.LoadFromEnvironment(&sc); err != nil { return nil, err @@ -34,7 +29,7 @@ func NewPluginSyncService( return &PluginSyncService{ ServiceContext: sc, serviceName: name, - plugin: newPluginManager(sc.Logger, os.Getenv(envVar)), + manager: manager, }, nil } @@ -43,8 +38,8 @@ func (s *PluginSyncService) Name() string { return s.serviceName } // Run starts the plugin manager (if any) and blocks until ctx is cancelled. func (s *PluginSyncService) Run(ctx context.Context) errors.Error { - if err := s.plugin.Start(ctx); err != nil { - s.ErrorF("plugin start error: %v", err) + if err := s.manager.Start(ctx); err != nil { + s.ErrorF("plugin manager start error: %v", err) } <-ctx.Done() return nil From c226b3cc2edab7f56e3514d8f27892d9f180e1e1 Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 16:06:12 +0100 Subject: [PATCH 12/14] modified pool key to be flattened --- internal/backends/backend.go | 4 +- internal/backends/dynamodb/dynamodb.go | 12 +- internal/pools/pool.go | 275 ++++++++++++++----------- 3 files changed, 164 insertions(+), 127 deletions(-) diff --git a/internal/backends/backend.go b/internal/backends/backend.go index 10d6ec2..059573c 100644 --- a/internal/backends/backend.go +++ b/internal/backends/backend.go @@ -4,7 +4,7 @@ import ( "context" "github.com/harishhary/blink/pkg/alerts" - "github.com/harishhary/blink/pkg/rules/config" + "github.com/harishhary/blink/pkg/rules" ) type Record map[string]any @@ -26,7 +26,7 @@ type IAlertStore interface { // IRuleStore covers rule-level queries (distinct rule names + bulk rule fetch). type IRuleStore interface { RuleNamesGenerator() <-chan string - FetchAllRules() (<-chan *config.RuleMetadata, error) + FetchAllRules() (<-chan *rules.RuleMetadata, error) } // IBackend is the full backend capability: alert store + rule store. diff --git a/internal/backends/dynamodb/dynamodb.go b/internal/backends/dynamodb/dynamodb.go index 6dbe5a8..9c7c327 100644 --- a/internal/backends/dynamodb/dynamodb.go +++ b/internal/backends/dynamodb/dynamodb.go @@ -16,7 +16,7 @@ import ( "github.com/harishhary/blink/internal/backends" "github.com/harishhary/blink/internal/helpers" "github.com/harishhary/blink/pkg/alerts" - rulesconfig "github.com/harishhary/blink/pkg/rules/config" + "github.com/harishhary/blink/pkg/rules" ) type DynamoDBBackend struct { @@ -327,7 +327,7 @@ func (at *DynamoDBBackend) ToAlert(record backends.Record) (*alerts.Alert, error func (at *DynamoDBBackend) ToRecord(alert *alerts.Alert) (backends.Record, error) { item, err := attributevalue.MarshalMap(backends.Record{ "RuleName": alert.Rule.Name, // Partition Key - "AlertID": alert.AlertID, // Sort/Range Key + "AlertID": alert.AlertID, // Sort/Range Key "Attempts": alert.Attempts, "Cluster": alert.Cluster, "Created": alert.Created.Format(helpers.DATETIME_FORMAT), @@ -356,13 +356,13 @@ func (at *DynamoDBBackend) ToRecord(alert *alerts.Alert) (backends.Record, error return result, nil } -func (at *DynamoDBBackend) FetchAllRules() (<-chan *rulesconfig.RuleMetadata, error) { +func (at *DynamoDBBackend) FetchAllRules() (<-chan *rules.RuleMetadata, error) { input := &dynamodb.ScanInput{ TableName: aws.String(at.dbName), Select: types.SelectAllAttributes, } - out := make(chan *rulesconfig.RuleMetadata) + out := make(chan *rules.RuleMetadata) go func() { defer close(out) generator := at.paginateScan(at.db.Scan, input) @@ -379,8 +379,8 @@ func (at *DynamoDBBackend) FetchAllRules() (<-chan *rulesconfig.RuleMetadata, er return out, nil } -func (at *DynamoDBBackend) unmarshalRule(item map[string]types.AttributeValue) (*rulesconfig.RuleMetadata, error) { - var rule rulesconfig.RuleMetadata +func (at *DynamoDBBackend) unmarshalRule(item map[string]types.AttributeValue) (*rules.RuleMetadata, error) { + var rule rules.RuleMetadata if err := attributevalue.UnmarshalMap(item, &rule); err != nil { return nil, fmt.Errorf("failed to unmarshal item to rule: %w", err) } diff --git a/internal/pools/pool.go b/internal/pools/pool.go index db91508..f80e99f 100644 --- a/internal/pools/pool.go +++ b/internal/pools/pool.go @@ -6,13 +6,11 @@ import ( "fmt" "hash/fnv" "log" + "sync" "sync/atomic" "time" ) -// Returned by the pool when a plugin's KillSwitch is true. -var ErrKillSwitched = errors.New("plugin kill-switched") - // Returned by Call when no active pool exists for the requested var ErrPluginNotFound = errors.New("plugin not found") @@ -63,17 +61,21 @@ func (m *RolloutMode) UnmarshalText(b []byte) error { } // func stub that returns per-plugin routing parameters. -// Return zero values for default blue-green behaviour (no kill switch, no canary). -type RoutingConfig func(pluginID string) (killSwitch bool, mode RolloutMode, rolloutPct float64) +// Return zero values for default blue-green behaviour. +type RoutingConfig func(pluginID string) (mode RolloutMode, rolloutPct float64) // PoolKey uniquely identifies a versioned plugin subprocess pool. type PoolKey struct { - PluginID string - Version string + Id string + Version string + Hash string // SHA-256 of the binary; empty when not yet known } func (k PoolKey) String() string { - return k.PluginID + "@" + k.Version + if k.Hash != "" { + return k.Id + "@" + k.Version + "@" + k.Hash + } + return k.Id + "@" + k.Version } // VersionedPool manages a fixed-size pool of plugin subprocess handles of type T. @@ -141,8 +143,9 @@ type pendingPromotion struct { onDrained func() } -// Manages VersionedPools keyed by (PluginID, Version). +// Manages VersionedPools keyed by (Id, Version). type ProcessPool[T any] struct { + mu sync.RWMutex pools map[PoolKey]*VersionedPool[T] active map[string]PoolKey pending map[string]pendingPromotion @@ -180,39 +183,45 @@ func NewProcessPool[T any](routing RoutingConfig, metrics *PoolMetrics, drainTim // percentage as found by callCanary/callShadow. Call Promote(pluginID) to graduate the // new pool to production and drain the old one. func (pp *ProcessPool[T]) Register(key PoolKey, plugins []T, maxProcs int, onDrained func()) { + pp.mu.Lock() + defer pp.mu.Unlock() + pool := newVersionedPool(key, plugins, maxProcs) pp.pools[key] = pool if pp.metrics != nil { - pp.metrics.poolSize.WithLabelValues(key.PluginID, key.Version).Set(float64(pool.Size())) + pp.metrics.poolSize.WithLabelValues(key.Id, key.Version).Set(float64(pool.Size())) } // Clear tombstone: plugin has come back (re-deployed after deletion). - delete(pp.removed, key.PluginID) + delete(pp.removed, key.Id) - _, mode, _ := pp.routing(key.PluginID) + mode, _ := pp.routing(key.Id) if mode == RolloutModeCanary || mode == RolloutModeShadow { // Stage the new pool without promoting - preserve active as production. // First registration for this pluginID still needs an active entry. - if _, hasActive := pp.active[key.PluginID]; !hasActive { - pp.active[key.PluginID] = key + if _, hasActive := pp.active[key.Id]; !hasActive { + pp.active[key.Id] = key } else { // Drain the previous pending pool before replacing it so its subprocess // is killed and its onDrained callback fires. Without this, rapid deploys // in canary mode would orphan intermediate pools in pp.pools indefinitely. - if prev, ok := pp.pending[key.PluginID]; ok { + if prev, ok := pp.pending[key.Id]; ok { if prevPool, ok := pp.pools[prev.key]; ok { go pp.drain(prev.key, prevPool, prev.onDrained) } } - pp.pending[key.PluginID] = pendingPromotion{key: key, onDrained: onDrained} + pp.pending[key.Id] = pendingPromotion{key: key, onDrained: onDrained} } return } // Blue-green: promote immediately and drain old. - oldKey, hasOld := pp.active[key.PluginID] - pp.active[key.PluginID] = key + // Two co-existing blue-green binaries for the same plugin ID are prevented from ever + // reaching this point: IsReady() calls HasBlockingError() which runs Validate() fresh, + // and Validate() emits a blocking error for any plugin ID with multiple blue-green versions. + oldKey, hasOld := pp.active[key.Id] + pp.active[key.Id] = key if hasOld && oldKey != key { if oldPool, ok := pp.pools[oldKey]; ok { @@ -225,8 +234,11 @@ func (pp *ProcessPool[T]) Register(key PoolKey, plugins []T, maxProcs int, onDra // draining the old pool asynchronously. If no pending pool exists, this is a no-op. // Typically called by an operator API or a health-check once canary metrics are green. func (pp *ProcessPool[T]) Promote(pluginID string) { + pp.mu.Lock() + p, ok := pp.pending[pluginID] if !ok { + pp.mu.Unlock() return } delete(pp.pending, pluginID) @@ -234,60 +246,94 @@ func (pp *ProcessPool[T]) Promote(pluginID string) { oldKey, hasOld := pp.active[pluginID] pp.active[pluginID] = p.key + var drainPool *VersionedPool[T] + var drainKey PoolKey + noOldPool := false if hasOld && oldKey != p.key { - if oldPool, ok := pp.pools[oldKey]; ok { - go pp.drain(oldKey, oldPool, p.onDrained) + drainPool = pp.pools[oldKey] + drainKey = oldKey + noOldPool = drainPool == nil + } + pp.mu.Unlock() + + // Call onDrained outside the lock - it may run kill() which blocks for up to 3s + // on gRPC Shutdown. Holding the lock that long would stall all Call() invocations. + switch { + case drainPool != nil: + go pp.drain(drainKey, drainPool, p.onDrained) + case !hasOld || oldKey == p.key: + // No old pool to drain (first registration or same key promoted) - fire callback directly. + if p.onDrained != nil { + p.onDrained() } - } else if p.onDrained != nil { - p.onDrained() + case noOldPool: + // Old key existed in active but pool was already removed - skip onDrained. } } -// Unregister removes the active pool for pluginID and drains it asynchronously. Any pending canary/shadow pool for the same pluginID is also drained. -// Used for transient stops (crash restarts, config disables) - no tombstone is set. Subsequent Call invocations return ErrPluginNotFound until the plugin re-registers. -func (pp *ProcessPool[T]) Unregister(pluginID string) { - if p, ok := pp.pending[pluginID]; ok { - delete(pp.pending, pluginID) +// Unregister drains the specific versioned pool identified by key. +// Used for transient stops (crash restarts, config disables) — no tombstone is set. +// Only the pool that crashed is torn down; other versions of the same plugin are unaffected. +func (pp *ProcessPool[T]) Unregister(key PoolKey) { + pp.mu.Lock() + defer pp.mu.Unlock() + + // If it's the pending (canary/shadow) pool, drain it only. + if p, ok := pp.pending[key.Id]; ok && p.key == key { + delete(pp.pending, key.Id) if pool, ok := pp.pools[p.key]; ok { go pp.drain(p.key, pool, p.onDrained) } + return } - key, ok := pp.active[pluginID] - if !ok { + + // If it's the active pool, drain it only. + activeKey, ok := pp.active[key.Id] + if !ok || activeKey != key { return } - delete(pp.active, pluginID) - if pool, ok := pp.pools[key]; ok { - go pp.drain(key, pool, nil) + delete(pp.active, key.Id) + if pool, ok := pp.pools[activeKey]; ok { + go pp.drain(activeKey, pool, nil) } } -// Remove removes the active pool for pluginID, drains it asynchronously, and tombstones the plugin ID. Any pending canary/shadow pool is also drained. -// Used when a binary is permanently deleted from disk. Subsequent Call invocations return ErrPluginRemoved. -func (pp *ProcessPool[T]) Remove(pluginID string) { - if p, ok := pp.pending[pluginID]; ok { - delete(pp.pending, pluginID) +// Remove drains the specific versioned pool identified by key and tombstones the plugin ID +// only when no other pools for that plugin remain. +// Used when a binary is permanently deleted from disk. +func (pp *ProcessPool[T]) Remove(key PoolKey) { + pp.mu.Lock() + defer pp.mu.Unlock() + + if p, ok := pp.pending[key.Id]; ok && p.key == key { + delete(pp.pending, key.Id) if pool, ok := pp.pools[p.key]; ok { go pp.drain(p.key, pool, p.onDrained) } - } - key, ok := pp.active[pluginID] - if !ok { - pp.removed[pluginID] = struct{}{} + } else if activeKey, ok := pp.active[key.Id]; ok && activeKey == key { + delete(pp.active, key.Id) + if pool, ok := pp.pools[activeKey]; ok { + go pp.drain(activeKey, pool, nil) + } + } else { + // Key not currently tracked — tombstone so callers don't wait forever. + pp.removed[key.Id] = struct{}{} return } - delete(pp.active, pluginID) - pp.removed[pluginID] = struct{}{} - if pool, ok := pp.pools[key]; ok { - go pp.drain(key, pool, nil) + + // Tombstone only if no pools remain for this plugin. + _, hasActive := pp.active[key.Id] + _, hasPending := pp.pending[key.Id] + if !hasActive && !hasPending { + pp.removed[key.Id] = struct{}{} } } // DefaultCanaryHashKey is the call-site key used for consistent-hash canary routing. var DefaultCanaryHashKey = "tenant_id" -// Acquires a handle from the appropriate pool (respecting kill-switch and -// canary/blue-green routing), invokes fn on it, and releases the handle. +// Acquires a handle from the appropriate pool (respecting canary/blue-green routing), +// invokes fn on it, and releases the handle. // // For shadow mode, only the production pool is called. Use CallWithShadow to also evaluate a shadow pool concurrently with a separate, independent closure. func (pp *ProcessPool[T]) Call(ctx context.Context, id, hashKey string, fn func(context.Context, T) error) error { @@ -299,47 +345,48 @@ func (pp *ProcessPool[T]) Call(ctx context.Context, id, hashKey string, fn func( // state (e.g. a cloned input, a separate result variable) to avoid data races with prodFn. // Shadow errors are logged and counted but do not affect the return value. func (pp *ProcessPool[T]) CallWithShadow(ctx context.Context, id, hashKey string, prodFn, shadowFn func(context.Context, T) error) error { - if err := pp.checkKillSwitch(id); err != nil { - return err - } - + // Snapshot everything we need under a short read lock. + // User code (prodFn/shadowFn) is called after the lock is released. + pp.mu.RLock() key, ok := pp.active[id] if !ok { - if _, removed := pp.removed[id]; removed { + _, removed := pp.removed[id] + pp.mu.RUnlock() + if removed { return fmt.Errorf("%w: %s", ErrPluginRemoved, id) } return fmt.Errorf("%w: %s", ErrPluginNotFound, id) } - - _, mode, rolloutPct := pp.routing(id) - switch mode { - case RolloutModeCanary: - return pp.callCanary(ctx, key, id, hashKey, rolloutPct, prodFn) - case RolloutModeShadow: - return pp.callShadow(ctx, key, id, prodFn, shadowFn) + mode, rolloutPct := pp.routing(id) + prodPool := pp.pools[key] + // For canary/shadow: find any registered non-active pool for the same pluginID. + var altPool *VersionedPool[T] + if mode == RolloutModeCanary || mode == RolloutModeShadow { + for k, p := range pp.pools { + if k.Id == id && k != key { + altPool = p + break + } + } } + pp.mu.RUnlock() - pool, ok := pp.pools[key] - if !ok { + if prodPool == nil { return fmt.Errorf("processpool: pool %s not found", key) } - return pp.callPool(ctx, pool, prodFn) -} -func (pp *ProcessPool[T]) checkKillSwitch(id string) error { - killSwitch, _, _ := pp.routing(id) - if killSwitch { - if pp.metrics != nil { - pp.metrics.killSwitches.WithLabelValues(id).Inc() - } - return fmt.Errorf("%w: %s", ErrKillSwitched, id) + switch mode { + case RolloutModeCanary: + return pp.callCanary(ctx, id, hashKey, rolloutPct, prodPool, altPool, prodFn) + case RolloutModeShadow: + return pp.callShadow(ctx, id, prodPool, altPool, prodFn, shadowFn) } - return nil + return pp.callPool(ctx, prodPool, prodFn) } -// callCanary routes rolloutPct% of calls (via consistent hash on hashKey) to any -// non-active pool for the same pluginID. Remaining calls go to the production (active) pool. -func (pp *ProcessPool[T]) callCanary(ctx context.Context, prodKey PoolKey, id, hashKey string, rolloutPct float64, fn func(context.Context, T) error) error { +// callCanary routes rolloutPct% of calls (via consistent hash on hashKey) to altPool +// when one exists. Pool pointers are pre-snapshotted by the caller under RLock. +func (pp *ProcessPool[T]) callCanary(ctx context.Context, id string, hashKey string, rolloutPct float64, prodPool, altPool *VersionedPool[T], fn func(context.Context, T) error) error { if hashKey == "" { hashKey = DefaultCanaryHashKey } @@ -347,54 +394,37 @@ func (pp *ProcessPool[T]) callCanary(ctx context.Context, prodKey PoolKey, id, h h.Write([]byte(hashKey)) pct := float64(h.Sum32()%100) + 1 // 1–100 - if pct <= rolloutPct { - // Find a registered non-active pool for the same pluginID. - for k, pool := range pp.pools { - if k.PluginID == id && k != prodKey { - return pp.callPool(ctx, pool, fn) - } - } - } - - prodPool, ok := pp.pools[prodKey] - if !ok { - return fmt.Errorf("processpool: production pool %s not found", prodKey) + if pct <= rolloutPct && altPool != nil { + return pp.callPool(ctx, altPool, fn) } return pp.callPool(ctx, prodPool, fn) } -// callShadow calls prodFn on the production pool, then fires shadowFn on any -// non-active pool for the same pluginID in a background goroutine. -func (pp *ProcessPool[T]) callShadow(ctx context.Context, prodKey PoolKey, id string, prodFn, shadowFn func(context.Context, T) error) error { - prodPool, ok := pp.pools[prodKey] - if !ok { - return fmt.Errorf("processpool: production pool %s not found", prodKey) - } - +// callShadow calls prodFn on the production pool, then fires shadowFn on altPool +// in a background goroutine. Pool pointers are pre-snapshotted by the caller under RLock. +func (pp *ProcessPool[T]) callShadow(ctx context.Context, id string, prodPool, altPool *VersionedPool[T], prodFn, shadowFn func(context.Context, T) error) error { prodErr := pp.callPool(ctx, prodPool, prodFn) - if shadowFn != nil { - // Find a registered non-active pool for the same pluginID. - for k, sp := range pp.pools { - if k.PluginID == id && k != prodKey { - shadowPool := sp - go func() { - plugin, err := shadowPool.Acquire(ctx) - if err != nil { - log.Printf("processpool: shadow acquire failed for %s: %v", id, err) - return - } - defer shadowPool.Release(plugin) - if err := shadowFn(ctx, plugin); err != nil { - log.Printf("processpool: shadow error for %s: %v", id, err) - if pp.metrics != nil { - pp.metrics.shadowDiffs.WithLabelValues(id).Inc() - } - } - }() - break + if shadowFn != nil && altPool != nil { + // Detach from the caller's context: the production call has already returned, + // so the caller's deadline may have expired or the ctx may be cancelled before + // the shadow goroutine gets CPU time. Shadow evaluation must be independent. + shadowCtx := context.WithoutCancel(ctx) + shadowPool := altPool + go func() { + plugin, err := shadowPool.Acquire(shadowCtx) + if err != nil { + log.Printf("processpool: shadow acquire failed for %s: %v", id, err) + return } - } + defer shadowPool.Release(plugin) + if err := shadowFn(shadowCtx, plugin); err != nil { + log.Printf("processpool: shadow error for %s: %v", id, err) + if pp.metrics != nil { + pp.metrics.shadowDiffs.WithLabelValues(id).Inc() + } + } + }() } return prodErr @@ -427,9 +457,9 @@ func (pp *ProcessPool[T]) drain(key PoolKey, pool *VersionedPool[T], onDrained f elapsed := time.Since(start).Seconds() if pp.metrics != nil { - pp.metrics.drainDuration.WithLabelValues(key.PluginID, key.Version).Observe(elapsed) - pp.metrics.poolSize.WithLabelValues(key.PluginID, key.Version).Set(0) - pp.metrics.poolInflight.WithLabelValues(key.PluginID, key.Version).Set(0) + pp.metrics.drainDuration.WithLabelValues(key.Id, key.Version).Observe(elapsed) + pp.metrics.poolSize.WithLabelValues(key.Id, key.Version).Set(0) + pp.metrics.poolInflight.WithLabelValues(key.Id, key.Version).Set(0) } if pool.Inflight() > 0 { @@ -437,7 +467,14 @@ func (pp *ProcessPool[T]) drain(key PoolKey, pool *VersionedPool[T], onDrained f } else { log.Printf("processpool: drained pool %s in %.2fs", key, elapsed) } - delete(pp.pools, key) + + // Only delete if this exact pool is still registered at this key. + // A concurrent Register() may have replaced it while we were waiting. + pp.mu.Lock() + if pp.pools[key] == pool { + delete(pp.pools, key) + } + pp.mu.Unlock() if onDrained != nil { onDrained() From 09bd571c850fd45e6bec2c539f95d5a6446ffecb Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 16:16:52 +0100 Subject: [PATCH 13/14] fixing imports in tests --- pkg/rules/manager_test.go | 3 +-- pkg/rules/testdata/crashing_rule/main.go | 6 +++--- pkg/rules/testdata/simple_rule/main.go | 6 +++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pkg/rules/manager_test.go b/pkg/rules/manager_test.go index b346e2c..1fda261 100644 --- a/pkg/rules/manager_test.go +++ b/pkg/rules/manager_test.go @@ -13,7 +13,6 @@ import ( "github.com/harishhary/blink/internal/plugin" "github.com/harishhary/blink/internal/services" "github.com/harishhary/blink/pkg/rules" - "github.com/harishhary/blink/pkg/rules/config" ) const ( @@ -122,7 +121,7 @@ func TestManagerHotReload(t *testing.T) { // available when the binary appears. writeSidecar(t, dir) - cfgMgr := config.NewRuleConfigManager(logger.New("test-config", "dev"), dir) + cfgMgr := rules.NewRuleConfigManager(logger.New("test-config", "dev"), dir) cfgSvc := services.NewConfigSyncService("test-config", "test-config", cfgMgr) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pkg/rules/testdata/crashing_rule/main.go b/pkg/rules/testdata/crashing_rule/main.go index bf32d04..8812636 100644 --- a/pkg/rules/testdata/crashing_rule/main.go +++ b/pkg/rules/testdata/crashing_rule/main.go @@ -7,10 +7,10 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/sdk" + "github.com/harishhary/blink/pkg/rules" ) -type crashingRule struct{ sdk.BaseRule } +type crashingRule struct{ rules.BaseRule } func (crashingRule) Evaluate(_ context.Context, _ events.Event) (bool, errors.Error) { return false, nil @@ -23,5 +23,5 @@ func main() { time.Sleep(300 * time.Millisecond) os.Exit(1) }() - sdk.Serve(crashingRule{}) + rules.Serve(crashingRule{}) } diff --git a/pkg/rules/testdata/simple_rule/main.go b/pkg/rules/testdata/simple_rule/main.go index 38f6667..9cd1895 100644 --- a/pkg/rules/testdata/simple_rule/main.go +++ b/pkg/rules/testdata/simple_rule/main.go @@ -5,15 +5,15 @@ import ( "github.com/harishhary/blink/internal/errors" "github.com/harishhary/blink/pkg/events" - "github.com/harishhary/blink/pkg/rules/sdk" + "github.com/harishhary/blink/pkg/rules" ) -type simpleRule struct{ sdk.BaseRule } +type simpleRule struct{ rules.BaseRule } func (simpleRule) Evaluate(_ context.Context, _ events.Event) (bool, errors.Error) { return true, nil } func main() { - sdk.Serve(simpleRule{}) + rules.Serve(simpleRule{}) } From 3439cb8660dea979bbb5a61890ff3182136bcc8e Mon Sep 17 00:00:00 2001 From: Harish Segar Date: Wed, 25 Mar 2026 20:45:47 +0100 Subject: [PATCH 14/14] adding better proto file --- pkg/alerts/pb/alert.proto | 81 +++--- .../rpc_enrichments/enrichment.pb.go | 263 +++--------------- .../rpc_enrichments/enrichment.proto | 25 +- .../rpc_enrichments/enrichment_grpc.pb.go | 76 ----- pkg/formatters/rpc_formatters/formatter.pb.go | 239 ++-------------- pkg/formatters/rpc_formatters/formatter.proto | 30 +- .../rpc_formatters/formatter_grpc.pb.go | 76 ----- pkg/matchers/rpc_matchers/matcher.pb.go | 259 +++-------------- pkg/matchers/rpc_matchers/matcher.proto | 27 +- pkg/matchers/rpc_matchers/matcher_grpc.pb.go | 84 +----- pkg/rules/pool.go | 2 - pkg/rules/rpc_rules/rule.pb.go | 162 +++-------- pkg/rules/rpc_rules/rule.proto | 25 +- pkg/rules/rpc_rules/rule_grpc.pb.go | 38 --- .../rpc_tuning_rules/tuning_rule.pb.go | 6 +- .../rpc_tuning_rules/tuning_rule.proto | 14 +- 16 files changed, 206 insertions(+), 1201 deletions(-) diff --git a/pkg/alerts/pb/alert.proto b/pkg/alerts/pb/alert.proto index 09bd725..ff773a2 100644 --- a/pkg/alerts/pb/alert.proto +++ b/pkg/alerts/pb/alert.proto @@ -6,52 +6,51 @@ import "google/protobuf/struct.proto"; option go_package = "pb/;pb"; // RuleMetadata carries rule configuration in the alert wire format. -// Mirrors pkg/rules/rule.proto Metadata with additions for file_name, -// display_name, references, and risk_score. +// Mirrors pkg/rules.RuleMetadata with additions for display_name, references, and risk_score. message RuleMetadata { - string id = 1; - string name = 2; - string description = 3; - bool enabled = 4; - string severity = 5; // "info|low|medium|high|critical" - string confidence = 6; // "very_low|low|medium|high|very_high" - repeated string merge_by_keys = 7; + string id = 1; + string name = 2; + string description = 3; + bool enabled = 4; + string severity = 5; // "info|low|medium|high|critical" + string confidence = 6; // "very_low|low|medium|high|very_high" + repeated string merge_by_keys = 7; uint32 merge_window_mins = 8; - repeated string req_subkeys = 9; - bool signal = 10; - string signal_threshold = 11; - repeated string tags = 12; - repeated string dispatchers = 13; - repeated string log_types = 14; - repeated string matchers = 15; - repeated string formatters = 16; - repeated string enrichments = 17; - repeated string tuning_rules = 18; - string version = 19; - string file_name = 20; - string display_name = 21; - repeated string references = 22; - string risk_score = 23; + repeated string req_subkeys = 9; + bool signal = 10; + string signal_threshold = 11; + repeated string tags = 12; + repeated string dispatchers = 13; + repeated string log_types = 14; + repeated string matchers = 15; + repeated string formatters = 16; + repeated string enrichments = 17; + repeated string tuning_rules = 18; + string version = 19; + string file_name = 20; + string display_name = 21; + repeated string references = 22; + string risk_score = 23; } // Alert is the Kafka wire format for a single alert travelling through the // tuner → enricher → formatter → dispatcher pipeline. message Alert { - string alert_id = 1; - int32 attempts = 2; - string cluster = 3; - int64 created_ns = 4; // time.Time as Unix nanoseconds - int64 dispatched_ns = 5; // time.Time as Unix nanoseconds (0 = not yet dispatched) - google.protobuf.Struct event = 6; // events.Event as structured protobuf - bool staged = 7; - repeated string outputs_sent = 8; - string log_source = 9; - string log_type = 10; - string source_entity = 11; - string source_service = 12; - string confidence = 13; - string severity = 14; - RuleMetadata rule = 15; - repeated string enrichments_applied = 16; - repeated string override_merge_by_keys = 17; // set by plugin's AlertMergeByKeys; overrides rule.merge_by_keys when non-empty + string alert_id = 1; + int32 attempts = 2; + string cluster = 3; + int64 created_ns = 4; // time.Time as Unix nanoseconds + int64 dispatched_ns = 5; // time.Time as Unix nanoseconds (0 = not yet dispatched) + google.protobuf.Struct event = 6; // events.Event as structured protobuf + bool staged = 7; + repeated string outputs_sent = 8; + string log_source = 9; + string log_type = 10; + string source_entity = 11; + string source_service = 12; + string confidence = 13; + string severity = 14; + RuleMetadata rule = 15; + repeated string enrichments_applied = 16; + repeated string override_merge_by_keys = 17; // set by plugin's AlertMergeByKeys; overrides rule.merge_by_keys when non-empty } diff --git a/pkg/enrichments/rpc_enrichments/enrichment.pb.go b/pkg/enrichments/rpc_enrichments/enrichment.pb.go index 5ec1745..c6a3c2b 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment.pb.go +++ b/pkg/enrichments/rpc_enrichments/enrichment.pb.go @@ -57,90 +57,6 @@ func (*Empty) Descriptor() ([]byte, []int) { return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{0} } -type EnrichmentMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - DependsOn []string `protobuf:"bytes,5,rep,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"` - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnrichmentMetadata) Reset() { - *x = EnrichmentMetadata{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnrichmentMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnrichmentMetadata) ProtoMessage() {} - -func (x *EnrichmentMetadata) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnrichmentMetadata.ProtoReflect.Descriptor instead. -func (*EnrichmentMetadata) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{1} -} - -func (x *EnrichmentMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *EnrichmentMetadata) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *EnrichmentMetadata) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *EnrichmentMetadata) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *EnrichmentMetadata) GetDependsOn() []string { - if x != nil { - return x.DependsOn - } - return nil -} - -func (x *EnrichmentMetadata) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - type Alert struct { state protoimpl.MessageState `protogen:"open.v1"` Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` @@ -150,7 +66,7 @@ type Alert struct { func (x *Alert) Reset() { *x = Alert{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -162,7 +78,7 @@ func (x *Alert) String() string { func (*Alert) ProtoMessage() {} func (x *Alert) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -175,7 +91,7 @@ func (x *Alert) ProtoReflect() protoreflect.Message { // Deprecated: Use Alert.ProtoReflect.Descriptor instead. func (*Alert) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{2} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{1} } func (x *Alert) GetJson() []byte { @@ -185,94 +101,6 @@ func (x *Alert) GetJson() []byte { return nil } -type EnrichRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Alert *Alert `protobuf:"bytes,1,opt,name=alert,proto3" json:"alert,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnrichRequest) Reset() { - *x = EnrichRequest{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnrichRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnrichRequest) ProtoMessage() {} - -func (x *EnrichRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnrichRequest.ProtoReflect.Descriptor instead. -func (*EnrichRequest) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{3} -} - -func (x *EnrichRequest) GetAlert() *Alert { - if x != nil { - return x.Alert - } - return nil -} - -type EnrichResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Alert *Alert `protobuf:"bytes,1,opt,name=alert,proto3" json:"alert,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnrichResponse) Reset() { - *x = EnrichResponse{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnrichResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnrichResponse) ProtoMessage() {} - -func (x *EnrichResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnrichResponse.ProtoReflect.Descriptor instead. -func (*EnrichResponse) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{4} -} - -func (x *EnrichResponse) GetAlert() *Alert { - if x != nil { - return x.Alert - } - return nil -} - type EnrichBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Alerts []*Alert `protobuf:"bytes,1,rep,name=alerts,proto3" json:"alerts,omitempty"` @@ -282,7 +110,7 @@ type EnrichBatchRequest struct { func (x *EnrichBatchRequest) Reset() { *x = EnrichBatchRequest{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[5] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -294,7 +122,7 @@ func (x *EnrichBatchRequest) String() string { func (*EnrichBatchRequest) ProtoMessage() {} func (x *EnrichBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[5] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -307,7 +135,7 @@ func (x *EnrichBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EnrichBatchRequest.ProtoReflect.Descriptor instead. func (*EnrichBatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{5} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{2} } func (x *EnrichBatchRequest) GetAlerts() []*Alert { @@ -326,7 +154,7 @@ type EnrichBatchResponse struct { func (x *EnrichBatchResponse) Reset() { *x = EnrichBatchResponse{} - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[6] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -338,7 +166,7 @@ func (x *EnrichBatchResponse) String() string { func (*EnrichBatchResponse) ProtoMessage() {} func (x *EnrichBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[6] + mi := &file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -351,7 +179,7 @@ func (x *EnrichBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EnrichBatchResponse.ProtoReflect.Descriptor instead. func (*EnrichBatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{6} + return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP(), []int{3} } func (x *EnrichBatchResponse) GetAlerts() []*Alert { @@ -366,30 +194,16 @@ var File_pkg_enrichments_rpc_enrichments_enrichment_proto protoreflect.FileDescr const file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc = "" + "\n" + "0pkg/enrichments/rpc_enrichments/enrichment.proto\x12\venrichments\"\a\n" + - "\x05Empty\"\xad\x01\n" + - "\x12EnrichmentMetadata\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + - "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x1d\n" + - "\n" + - "depends_on\x18\x05 \x03(\tR\tdependsOn\x12\x18\n" + - "\aversion\x18\x06 \x01(\tR\aversion\"\x1b\n" + + "\x05Empty\"\x1b\n" + "\x05Alert\x12\x12\n" + - "\x04json\x18\x01 \x01(\fR\x04json\"9\n" + - "\rEnrichRequest\x12(\n" + - "\x05alert\x18\x01 \x01(\v2\x12.enrichments.AlertR\x05alert\":\n" + - "\x0eEnrichResponse\x12(\n" + - "\x05alert\x18\x01 \x01(\v2\x12.enrichments.AlertR\x05alert\"@\n" + + "\x04json\x18\x01 \x01(\fR\x04json\"@\n" + "\x12EnrichBatchRequest\x12*\n" + "\x06alerts\x18\x01 \x03(\v2\x12.enrichments.AlertR\x06alerts\"A\n" + "\x13EnrichBatchResponse\x12*\n" + - "\x06alerts\x18\x01 \x03(\v2\x12.enrichments.AlertR\x06alerts2\xf9\x02\n" + + "\x06alerts\x18\x01 \x03(\v2\x12.enrichments.AlertR\x06alerts2\xf2\x01\n" + "\n" + - "Enrichment\x12B\n" + - "\vGetMetadata\x12\x12.enrichments.Empty\x1a\x1f.enrichments.EnrichmentMetadata\x12.\n" + - "\x04Init\x12\x12.enrichments.Empty\x1a\x12.enrichments.Empty\x12A\n" + - "\x06Enrich\x12\x1a.enrichments.EnrichRequest\x1a\x1b.enrichments.EnrichResponse\x12P\n" + + "Enrichment\x12.\n" + + "\x04Init\x12\x12.enrichments.Empty\x1a\x12.enrichments.Empty\x12P\n" + "\vEnrichBatch\x12\x1f.enrichments.EnrichBatchRequest\x1a .enrichments.EnrichBatchResponse\x122\n" + "\bShutdown\x12\x12.enrichments.Empty\x1a\x12.enrichments.Empty\x12.\n" + "\x04Ping\x12\x12.enrichments.Empty\x1a\x12.enrichments.EmptyB\"Z rpc_enrichments/;rpc_enrichmentsb\x06proto3" @@ -406,38 +220,29 @@ func file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescGZIP() []byte return file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDescData } -var file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_enrichments_rpc_enrichments_enrichment_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_pkg_enrichments_rpc_enrichments_enrichment_proto_goTypes = []any{ (*Empty)(nil), // 0: enrichments.Empty - (*EnrichmentMetadata)(nil), // 1: enrichments.EnrichmentMetadata - (*Alert)(nil), // 2: enrichments.Alert - (*EnrichRequest)(nil), // 3: enrichments.EnrichRequest - (*EnrichResponse)(nil), // 4: enrichments.EnrichResponse - (*EnrichBatchRequest)(nil), // 5: enrichments.EnrichBatchRequest - (*EnrichBatchResponse)(nil), // 6: enrichments.EnrichBatchResponse + (*Alert)(nil), // 1: enrichments.Alert + (*EnrichBatchRequest)(nil), // 2: enrichments.EnrichBatchRequest + (*EnrichBatchResponse)(nil), // 3: enrichments.EnrichBatchResponse } var file_pkg_enrichments_rpc_enrichments_enrichment_proto_depIdxs = []int32{ - 2, // 0: enrichments.EnrichRequest.alert:type_name -> enrichments.Alert - 2, // 1: enrichments.EnrichResponse.alert:type_name -> enrichments.Alert - 2, // 2: enrichments.EnrichBatchRequest.alerts:type_name -> enrichments.Alert - 2, // 3: enrichments.EnrichBatchResponse.alerts:type_name -> enrichments.Alert - 0, // 4: enrichments.Enrichment.GetMetadata:input_type -> enrichments.Empty - 0, // 5: enrichments.Enrichment.Init:input_type -> enrichments.Empty - 3, // 6: enrichments.Enrichment.Enrich:input_type -> enrichments.EnrichRequest - 5, // 7: enrichments.Enrichment.EnrichBatch:input_type -> enrichments.EnrichBatchRequest - 0, // 8: enrichments.Enrichment.Shutdown:input_type -> enrichments.Empty - 0, // 9: enrichments.Enrichment.Ping:input_type -> enrichments.Empty - 1, // 10: enrichments.Enrichment.GetMetadata:output_type -> enrichments.EnrichmentMetadata - 0, // 11: enrichments.Enrichment.Init:output_type -> enrichments.Empty - 4, // 12: enrichments.Enrichment.Enrich:output_type -> enrichments.EnrichResponse - 6, // 13: enrichments.Enrichment.EnrichBatch:output_type -> enrichments.EnrichBatchResponse - 0, // 14: enrichments.Enrichment.Shutdown:output_type -> enrichments.Empty - 0, // 15: enrichments.Enrichment.Ping:output_type -> enrichments.Empty - 10, // [10:16] is the sub-list for method output_type - 4, // [4:10] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 1, // 0: enrichments.EnrichBatchRequest.alerts:type_name -> enrichments.Alert + 1, // 1: enrichments.EnrichBatchResponse.alerts:type_name -> enrichments.Alert + 0, // 2: enrichments.Enrichment.Init:input_type -> enrichments.Empty + 2, // 3: enrichments.Enrichment.EnrichBatch:input_type -> enrichments.EnrichBatchRequest + 0, // 4: enrichments.Enrichment.Shutdown:input_type -> enrichments.Empty + 0, // 5: enrichments.Enrichment.Ping:input_type -> enrichments.Empty + 0, // 6: enrichments.Enrichment.Init:output_type -> enrichments.Empty + 3, // 7: enrichments.Enrichment.EnrichBatch:output_type -> enrichments.EnrichBatchResponse + 0, // 8: enrichments.Enrichment.Shutdown:output_type -> enrichments.Empty + 0, // 9: enrichments.Enrichment.Ping:output_type -> enrichments.Empty + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_pkg_enrichments_rpc_enrichments_enrichment_proto_init() } @@ -451,7 +256,7 @@ func file_pkg_enrichments_rpc_enrichments_enrichment_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc), len(file_pkg_enrichments_rpc_enrichments_enrichment_proto_rawDesc)), NumEnums: 0, - NumMessages: 7, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/enrichments/rpc_enrichments/enrichment.proto b/pkg/enrichments/rpc_enrichments/enrichment.proto index 96e6f2f..7ad17b1 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment.proto +++ b/pkg/enrichments/rpc_enrichments/enrichment.proto @@ -3,25 +3,16 @@ package enrichments; option go_package = "rpc_enrichments/;rpc_enrichments"; message Empty {} -message EnrichmentMetadata { - string id = 1; - string name = 2; - string description = 3; - bool enabled = 4; - repeated string depends_on = 5; - string version = 6; -} -message Alert { bytes json = 1; } -message EnrichRequest { Alert alert = 1; } -message EnrichResponse { Alert alert = 1; } + +message Alert { bytes json = 1; } + message EnrichBatchRequest { repeated Alert alerts = 1; } + message EnrichBatchResponse { repeated Alert alerts = 1; } service Enrichment { - rpc GetMetadata(Empty) returns (EnrichmentMetadata); - rpc Init(Empty) returns (Empty); - rpc Enrich(EnrichRequest) returns (EnrichResponse); - rpc EnrichBatch(EnrichBatchRequest) returns (EnrichBatchResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc Init(Empty) returns (Empty); + rpc EnrichBatch(EnrichBatchRequest) returns (EnrichBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go b/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go index 63c5821..36e69ca 100644 --- a/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go +++ b/pkg/enrichments/rpc_enrichments/enrichment_grpc.pb.go @@ -19,9 +19,7 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - Enrichment_GetMetadata_FullMethodName = "/enrichments.Enrichment/GetMetadata" Enrichment_Init_FullMethodName = "/enrichments.Enrichment/Init" - Enrichment_Enrich_FullMethodName = "/enrichments.Enrichment/Enrich" Enrichment_EnrichBatch_FullMethodName = "/enrichments.Enrichment/EnrichBatch" Enrichment_Shutdown_FullMethodName = "/enrichments.Enrichment/Shutdown" Enrichment_Ping_FullMethodName = "/enrichments.Enrichment/Ping" @@ -31,9 +29,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type EnrichmentClient interface { - GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*EnrichmentMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - Enrich(ctx context.Context, in *EnrichRequest, opts ...grpc.CallOption) (*EnrichResponse, error) EnrichBatch(ctx context.Context, in *EnrichBatchRequest, opts ...grpc.CallOption) (*EnrichBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -47,16 +43,6 @@ func NewEnrichmentClient(cc grpc.ClientConnInterface) EnrichmentClient { return &enrichmentClient{cc} } -func (c *enrichmentClient) GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*EnrichmentMetadata, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(EnrichmentMetadata) - err := c.cc.Invoke(ctx, Enrichment_GetMetadata_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *enrichmentClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -67,16 +53,6 @@ func (c *enrichmentClient) Init(ctx context.Context, in *Empty, opts ...grpc.Cal return out, nil } -func (c *enrichmentClient) Enrich(ctx context.Context, in *EnrichRequest, opts ...grpc.CallOption) (*EnrichResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(EnrichResponse) - err := c.cc.Invoke(ctx, Enrichment_Enrich_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *enrichmentClient) EnrichBatch(ctx context.Context, in *EnrichBatchRequest, opts ...grpc.CallOption) (*EnrichBatchResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(EnrichBatchResponse) @@ -111,9 +87,7 @@ func (c *enrichmentClient) Ping(ctx context.Context, in *Empty, opts ...grpc.Cal // All implementations must embed UnimplementedEnrichmentServer // for forward compatibility. type EnrichmentServer interface { - GetMetadata(context.Context, *Empty) (*EnrichmentMetadata, error) Init(context.Context, *Empty) (*Empty, error) - Enrich(context.Context, *EnrichRequest) (*EnrichResponse, error) EnrichBatch(context.Context, *EnrichBatchRequest) (*EnrichBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) @@ -127,15 +101,9 @@ type EnrichmentServer interface { // pointer dereference when methods are called. type UnimplementedEnrichmentServer struct{} -func (UnimplementedEnrichmentServer) GetMetadata(context.Context, *Empty) (*EnrichmentMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") -} func (UnimplementedEnrichmentServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } -func (UnimplementedEnrichmentServer) Enrich(context.Context, *EnrichRequest) (*EnrichResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Enrich not implemented") -} func (UnimplementedEnrichmentServer) EnrichBatch(context.Context, *EnrichBatchRequest) (*EnrichBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EnrichBatch not implemented") } @@ -166,24 +134,6 @@ func RegisterEnrichmentServer(s grpc.ServiceRegistrar, srv EnrichmentServer) { s.RegisterService(&Enrichment_ServiceDesc, srv) } -func _Enrichment_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EnrichmentServer).GetMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Enrichment_GetMetadata_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EnrichmentServer).GetMetadata(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _Enrichment_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -202,24 +152,6 @@ func _Enrichment_Init_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } -func _Enrichment_Enrich_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EnrichRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EnrichmentServer).Enrich(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Enrichment_Enrich_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EnrichmentServer).Enrich(ctx, req.(*EnrichRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Enrichment_EnrichBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EnrichBatchRequest) if err := dec(in); err != nil { @@ -281,18 +213,10 @@ var Enrichment_ServiceDesc = grpc.ServiceDesc{ ServiceName: "enrichments.Enrichment", HandlerType: (*EnrichmentServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetadata", - Handler: _Enrichment_GetMetadata_Handler, - }, { MethodName: "Init", Handler: _Enrichment_Init_Handler, }, - { - MethodName: "Enrich", - Handler: _Enrichment_Enrich_Handler, - }, { MethodName: "EnrichBatch", Handler: _Enrichment_EnrichBatch_Handler, diff --git a/pkg/formatters/rpc_formatters/formatter.pb.go b/pkg/formatters/rpc_formatters/formatter.pb.go index 209cd55..fed3697 100644 --- a/pkg/formatters/rpc_formatters/formatter.pb.go +++ b/pkg/formatters/rpc_formatters/formatter.pb.go @@ -57,180 +57,16 @@ func (*Empty) Descriptor() ([]byte, []int) { return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{0} } -type FormatterMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FormatterMetadata) Reset() { - *x = FormatterMetadata{} - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FormatterMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormatterMetadata) ProtoMessage() {} - -func (x *FormatterMetadata) ProtoReflect() protoreflect.Message { - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormatterMetadata.ProtoReflect.Descriptor instead. -func (*FormatterMetadata) Descriptor() ([]byte, []int) { - return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{1} -} - -func (x *FormatterMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *FormatterMetadata) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FormatterMetadata) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FormatterMetadata) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *FormatterMetadata) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -type FormatRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - AlertJson []byte `protobuf:"bytes,1,opt,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // JSON-encoded alerts.Alert - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FormatRequest) Reset() { - *x = FormatRequest{} - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FormatRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormatRequest) ProtoMessage() {} - -func (x *FormatRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormatRequest.ProtoReflect.Descriptor instead. -func (*FormatRequest) Descriptor() ([]byte, []int) { - return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{2} -} - -func (x *FormatRequest) GetAlertJson() []byte { - if x != nil { - return x.AlertJson - } - return nil -} - -type FormatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ResultJson []byte `protobuf:"bytes,1,opt,name=result_json,json=resultJson,proto3" json:"result_json,omitempty"` // JSON-encoded map[string]any - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FormatResponse) Reset() { - *x = FormatResponse{} - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FormatResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormatResponse) ProtoMessage() {} - -func (x *FormatResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormatResponse.ProtoReflect.Descriptor instead. -func (*FormatResponse) Descriptor() ([]byte, []int) { - return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{3} -} - -func (x *FormatResponse) GetResultJson() []byte { - if x != nil { - return x.ResultJson - } - return nil -} - type FormatBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded alerts.Alert per alert + AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *FormatBatchRequest) Reset() { *x = FormatBatchRequest{} - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[4] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -242,7 +78,7 @@ func (x *FormatBatchRequest) String() string { func (*FormatBatchRequest) ProtoMessage() {} func (x *FormatBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[4] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -255,7 +91,7 @@ func (x *FormatBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FormatBatchRequest.ProtoReflect.Descriptor instead. func (*FormatBatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{4} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{1} } func (x *FormatBatchRequest) GetAlertJson() [][]byte { @@ -267,14 +103,14 @@ func (x *FormatBatchRequest) GetAlertJson() [][]byte { type FormatBatchResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - ResultJson [][]byte `protobuf:"bytes,1,rep,name=result_json,json=resultJson,proto3" json:"result_json,omitempty"` // one JSON-encoded map[string]any per alert + ResultJson [][]byte `protobuf:"bytes,1,rep,name=result_json,json=resultJson,proto3" json:"result_json,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *FormatBatchResponse) Reset() { *x = FormatBatchResponse{} - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[5] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -286,7 +122,7 @@ func (x *FormatBatchResponse) String() string { func (*FormatBatchResponse) ProtoMessage() {} func (x *FormatBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[5] + mi := &file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -299,7 +135,7 @@ func (x *FormatBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FormatBatchResponse.ProtoReflect.Descriptor instead. func (*FormatBatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{5} + return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP(), []int{2} } func (x *FormatBatchResponse) GetResultJson() [][]byte { @@ -315,32 +151,18 @@ const file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc = "" + "\n" + "-pkg/formatters/rpc_formatters/formatter.proto\x12\n" + "formatters\"\a\n" + - "\x05Empty\"\x8d\x01\n" + - "\x11FormatterMetadata\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + - "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x18\n" + - "\aversion\x18\x05 \x01(\tR\aversion\".\n" + - "\rFormatRequest\x12\x1d\n" + - "\n" + - "alert_json\x18\x01 \x01(\fR\talertJson\"1\n" + - "\x0eFormatResponse\x12\x1f\n" + - "\vresult_json\x18\x01 \x01(\fR\n" + - "resultJson\"3\n" + + "\x05Empty\"3\n" + "\x12FormatBatchRequest\x12\x1d\n" + "\n" + "alert_json\x18\x01 \x03(\fR\talertJson\"6\n" + "\x13FormatBatchResponse\x12\x1f\n" + "\vresult_json\x18\x01 \x03(\fR\n" + - "resultJson2\xeb\x02\n" + - "\tFormatter\x12?\n" + - "\vGetMetadata\x12\x11.formatters.Empty\x1a\x1d.formatters.FormatterMetadata\x12,\n" + - "\x04Init\x12\x11.formatters.Empty\x1a\x11.formatters.Empty\x12?\n" + - "\x06Format\x12\x19.formatters.FormatRequest\x1a\x1a.formatters.FormatResponse\x12N\n" + + "resultJson2\xe9\x01\n" + + "\tFormatter\x12,\n" + + "\x04Init\x12\x11.formatters.Empty\x1a\x11.formatters.Empty\x12N\n" + "\vFormatBatch\x12\x1e.formatters.FormatBatchRequest\x1a\x1f.formatters.FormatBatchResponse\x120\n" + "\bShutdown\x12\x11.formatters.Empty\x1a\x11.formatters.Empty\x12,\n" + - "\x04Ping\x12\x11.formatters.Empty\x1a\x11.formatters.EmptyB;Z9github.com/harishhary/blink/pkg/formatters/rpc_formattersb\x06proto3" + "\x04Ping\x12\x11.formatters.Empty\x1a\x11.formatters.EmptyB Z\x1erpc_formatters/;rpc_formattersb\x06proto3" var ( file_pkg_formatters_rpc_formatters_formatter_proto_rawDescOnce sync.Once @@ -354,30 +176,23 @@ func file_pkg_formatters_rpc_formatters_formatter_proto_rawDescGZIP() []byte { return file_pkg_formatters_rpc_formatters_formatter_proto_rawDescData } -var file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_pkg_formatters_rpc_formatters_formatter_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_pkg_formatters_rpc_formatters_formatter_proto_goTypes = []any{ (*Empty)(nil), // 0: formatters.Empty - (*FormatterMetadata)(nil), // 1: formatters.FormatterMetadata - (*FormatRequest)(nil), // 2: formatters.FormatRequest - (*FormatResponse)(nil), // 3: formatters.FormatResponse - (*FormatBatchRequest)(nil), // 4: formatters.FormatBatchRequest - (*FormatBatchResponse)(nil), // 5: formatters.FormatBatchResponse + (*FormatBatchRequest)(nil), // 1: formatters.FormatBatchRequest + (*FormatBatchResponse)(nil), // 2: formatters.FormatBatchResponse } var file_pkg_formatters_rpc_formatters_formatter_proto_depIdxs = []int32{ - 0, // 0: formatters.Formatter.GetMetadata:input_type -> formatters.Empty - 0, // 1: formatters.Formatter.Init:input_type -> formatters.Empty - 2, // 2: formatters.Formatter.Format:input_type -> formatters.FormatRequest - 4, // 3: formatters.Formatter.FormatBatch:input_type -> formatters.FormatBatchRequest - 0, // 4: formatters.Formatter.Shutdown:input_type -> formatters.Empty - 0, // 5: formatters.Formatter.Ping:input_type -> formatters.Empty - 1, // 6: formatters.Formatter.GetMetadata:output_type -> formatters.FormatterMetadata - 0, // 7: formatters.Formatter.Init:output_type -> formatters.Empty - 3, // 8: formatters.Formatter.Format:output_type -> formatters.FormatResponse - 5, // 9: formatters.Formatter.FormatBatch:output_type -> formatters.FormatBatchResponse - 0, // 10: formatters.Formatter.Shutdown:output_type -> formatters.Empty - 0, // 11: formatters.Formatter.Ping:output_type -> formatters.Empty - 6, // [6:12] is the sub-list for method output_type - 0, // [0:6] is the sub-list for method input_type + 0, // 0: formatters.Formatter.Init:input_type -> formatters.Empty + 1, // 1: formatters.Formatter.FormatBatch:input_type -> formatters.FormatBatchRequest + 0, // 2: formatters.Formatter.Shutdown:input_type -> formatters.Empty + 0, // 3: formatters.Formatter.Ping:input_type -> formatters.Empty + 0, // 4: formatters.Formatter.Init:output_type -> formatters.Empty + 2, // 5: formatters.Formatter.FormatBatch:output_type -> formatters.FormatBatchResponse + 0, // 6: formatters.Formatter.Shutdown:output_type -> formatters.Empty + 0, // 7: formatters.Formatter.Ping:output_type -> formatters.Empty + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -394,7 +209,7 @@ func file_pkg_formatters_rpc_formatters_formatter_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc), len(file_pkg_formatters_rpc_formatters_formatter_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/formatters/rpc_formatters/formatter.proto b/pkg/formatters/rpc_formatters/formatter.proto index ce27f8f..e62175a 100644 --- a/pkg/formatters/rpc_formatters/formatter.proto +++ b/pkg/formatters/rpc_formatters/formatter.proto @@ -1,39 +1,15 @@ syntax = "proto3"; - package formatters; - -option go_package = "github.com/harishhary/blink/pkg/formatters/rpc_formatters"; +option go_package = "rpc_formatters/;rpc_formatters"; message Empty {} -message FormatterMetadata { - string id = 1; - string name = 2; - string description = 3; - bool enabled = 4; - string version = 5; -} - -message FormatRequest { - bytes alert_json = 1; // JSON-encoded alerts.Alert -} - -message FormatResponse { - bytes result_json = 1; // JSON-encoded map[string]any -} +message FormatBatchRequest { repeated bytes alert_json = 1; } // one JSON-encoded alerts.Alert per alert -message FormatBatchRequest { - repeated bytes alert_json = 1; // one JSON-encoded alerts.Alert per alert -} - -message FormatBatchResponse { - repeated bytes result_json = 1; // one JSON-encoded map[string]any per alert -} +message FormatBatchResponse { repeated bytes result_json = 1; } // one JSON-encoded map[string]any per alert service Formatter { - rpc GetMetadata(Empty) returns (FormatterMetadata); rpc Init(Empty) returns (Empty); - rpc Format(FormatRequest) returns (FormatResponse); rpc FormatBatch(FormatBatchRequest) returns (FormatBatchResponse); rpc Shutdown(Empty) returns (Empty); rpc Ping(Empty) returns (Empty); diff --git a/pkg/formatters/rpc_formatters/formatter_grpc.pb.go b/pkg/formatters/rpc_formatters/formatter_grpc.pb.go index 0a47e16..e00b099 100644 --- a/pkg/formatters/rpc_formatters/formatter_grpc.pb.go +++ b/pkg/formatters/rpc_formatters/formatter_grpc.pb.go @@ -19,9 +19,7 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - Formatter_GetMetadata_FullMethodName = "/formatters.Formatter/GetMetadata" Formatter_Init_FullMethodName = "/formatters.Formatter/Init" - Formatter_Format_FullMethodName = "/formatters.Formatter/Format" Formatter_FormatBatch_FullMethodName = "/formatters.Formatter/FormatBatch" Formatter_Shutdown_FullMethodName = "/formatters.Formatter/Shutdown" Formatter_Ping_FullMethodName = "/formatters.Formatter/Ping" @@ -31,9 +29,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type FormatterClient interface { - GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*FormatterMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - Format(ctx context.Context, in *FormatRequest, opts ...grpc.CallOption) (*FormatResponse, error) FormatBatch(ctx context.Context, in *FormatBatchRequest, opts ...grpc.CallOption) (*FormatBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -47,16 +43,6 @@ func NewFormatterClient(cc grpc.ClientConnInterface) FormatterClient { return &formatterClient{cc} } -func (c *formatterClient) GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*FormatterMetadata, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(FormatterMetadata) - err := c.cc.Invoke(ctx, Formatter_GetMetadata_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *formatterClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -67,16 +53,6 @@ func (c *formatterClient) Init(ctx context.Context, in *Empty, opts ...grpc.Call return out, nil } -func (c *formatterClient) Format(ctx context.Context, in *FormatRequest, opts ...grpc.CallOption) (*FormatResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(FormatResponse) - err := c.cc.Invoke(ctx, Formatter_Format_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *formatterClient) FormatBatch(ctx context.Context, in *FormatBatchRequest, opts ...grpc.CallOption) (*FormatBatchResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(FormatBatchResponse) @@ -111,9 +87,7 @@ func (c *formatterClient) Ping(ctx context.Context, in *Empty, opts ...grpc.Call // All implementations must embed UnimplementedFormatterServer // for forward compatibility. type FormatterServer interface { - GetMetadata(context.Context, *Empty) (*FormatterMetadata, error) Init(context.Context, *Empty) (*Empty, error) - Format(context.Context, *FormatRequest) (*FormatResponse, error) FormatBatch(context.Context, *FormatBatchRequest) (*FormatBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) @@ -127,15 +101,9 @@ type FormatterServer interface { // pointer dereference when methods are called. type UnimplementedFormatterServer struct{} -func (UnimplementedFormatterServer) GetMetadata(context.Context, *Empty) (*FormatterMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") -} func (UnimplementedFormatterServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } -func (UnimplementedFormatterServer) Format(context.Context, *FormatRequest) (*FormatResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Format not implemented") -} func (UnimplementedFormatterServer) FormatBatch(context.Context, *FormatBatchRequest) (*FormatBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FormatBatch not implemented") } @@ -166,24 +134,6 @@ func RegisterFormatterServer(s grpc.ServiceRegistrar, srv FormatterServer) { s.RegisterService(&Formatter_ServiceDesc, srv) } -func _Formatter_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FormatterServer).GetMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Formatter_GetMetadata_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FormatterServer).GetMetadata(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _Formatter_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -202,24 +152,6 @@ func _Formatter_Init_Handler(srv interface{}, ctx context.Context, dec func(inte return interceptor(ctx, in, info, handler) } -func _Formatter_Format_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FormatRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FormatterServer).Format(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Formatter_Format_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FormatterServer).Format(ctx, req.(*FormatRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Formatter_FormatBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FormatBatchRequest) if err := dec(in); err != nil { @@ -281,18 +213,10 @@ var Formatter_ServiceDesc = grpc.ServiceDesc{ ServiceName: "formatters.Formatter", HandlerType: (*FormatterServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetadata", - Handler: _Formatter_GetMetadata_Handler, - }, { MethodName: "Init", Handler: _Formatter_Init_Handler, }, - { - MethodName: "Format", - Handler: _Formatter_Format_Handler, - }, { MethodName: "FormatBatch", Handler: _Formatter_FormatBatch_Handler, diff --git a/pkg/matchers/rpc_matchers/matcher.pb.go b/pkg/matchers/rpc_matchers/matcher.pb.go index 051d6a4..6de2e4f 100644 --- a/pkg/matchers/rpc_matchers/matcher.pb.go +++ b/pkg/matchers/rpc_matchers/matcher.pb.go @@ -57,90 +57,6 @@ func (*Empty) Descriptor() ([]byte, []int) { return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{0} } -type MatcherMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` - Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global,omitempty"` - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MatcherMetadata) Reset() { - *x = MatcherMetadata{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MatcherMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MatcherMetadata) ProtoMessage() {} - -func (x *MatcherMetadata) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MatcherMetadata.ProtoReflect.Descriptor instead. -func (*MatcherMetadata) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{1} -} - -func (x *MatcherMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *MatcherMetadata) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *MatcherMetadata) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *MatcherMetadata) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *MatcherMetadata) GetGlobal() bool { - if x != nil { - return x.Global - } - return false -} - -func (x *MatcherMetadata) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - type Event struct { state protoimpl.MessageState `protogen:"open.v1"` Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` @@ -150,7 +66,7 @@ type Event struct { func (x *Event) Reset() { *x = Event{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -162,7 +78,7 @@ func (x *Event) String() string { func (*Event) ProtoMessage() {} func (x *Event) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -175,7 +91,7 @@ func (x *Event) ProtoReflect() protoreflect.Message { // Deprecated: Use Event.ProtoReflect.Descriptor instead. func (*Event) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{2} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{1} } func (x *Event) GetJson() []byte { @@ -185,94 +101,6 @@ func (x *Event) GetJson() []byte { return nil } -type MatchRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MatchRequest) Reset() { - *x = MatchRequest{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MatchRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MatchRequest) ProtoMessage() {} - -func (x *MatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MatchRequest.ProtoReflect.Descriptor instead. -func (*MatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{3} -} - -func (x *MatchRequest) GetEvent() *Event { - if x != nil { - return x.Event - } - return nil -} - -type MatchResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Matched bool `protobuf:"varint,1,opt,name=matched,proto3" json:"matched,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MatchResponse) Reset() { - *x = MatchResponse{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MatchResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MatchResponse) ProtoMessage() {} - -func (x *MatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MatchResponse.ProtoReflect.Descriptor instead. -func (*MatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{4} -} - -func (x *MatchResponse) GetMatched() bool { - if x != nil { - return x.Matched - } - return false -} - type MatchBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` @@ -282,7 +110,7 @@ type MatchBatchRequest struct { func (x *MatchBatchRequest) Reset() { *x = MatchBatchRequest{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[5] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -294,7 +122,7 @@ func (x *MatchBatchRequest) String() string { func (*MatchBatchRequest) ProtoMessage() {} func (x *MatchBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[5] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -307,7 +135,7 @@ func (x *MatchBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchBatchRequest.ProtoReflect.Descriptor instead. func (*MatchBatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{5} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{2} } func (x *MatchBatchRequest) GetEvents() []*Event { @@ -326,7 +154,7 @@ type MatchBatchResponse struct { func (x *MatchBatchResponse) Reset() { *x = MatchBatchResponse{} - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[6] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -338,7 +166,7 @@ func (x *MatchBatchResponse) String() string { func (*MatchBatchResponse) ProtoMessage() {} func (x *MatchBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[6] + mi := &file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -351,7 +179,7 @@ func (x *MatchBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MatchBatchResponse.ProtoReflect.Descriptor instead. func (*MatchBatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{6} + return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP(), []int{3} } func (x *MatchBatchResponse) GetMatched() []bool { @@ -366,28 +194,15 @@ var File_pkg_matchers_rpc_matchers_matcher_proto protoreflect.FileDescriptor const file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc = "" + "\n" + "'pkg/matchers/rpc_matchers/matcher.proto\x12\bmatchers\"\a\n" + - "\x05Empty\"\xa3\x01\n" + - "\x0fMatcherMetadata\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12 \n" + - "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x18\n" + - "\aenabled\x18\x04 \x01(\bR\aenabled\x12\x16\n" + - "\x06global\x18\x05 \x01(\bR\x06global\x12\x18\n" + - "\aversion\x18\x06 \x01(\tR\aversion\"\x1b\n" + + "\x05Empty\"\x1b\n" + "\x05Event\x12\x12\n" + - "\x04json\x18\x01 \x01(\fR\x04json\"5\n" + - "\fMatchRequest\x12%\n" + - "\x05event\x18\x01 \x01(\v2\x0f.matchers.EventR\x05event\")\n" + - "\rMatchResponse\x12\x18\n" + - "\amatched\x18\x01 \x01(\bR\amatched\"<\n" + + "\x04json\x18\x01 \x01(\fR\x04json\"<\n" + "\x11MatchBatchRequest\x12'\n" + "\x06events\x18\x01 \x03(\v2\x0f.matchers.EventR\x06events\".\n" + "\x12MatchBatchResponse\x12\x18\n" + - "\amatched\x18\x01 \x03(\bR\amatched2\xc9\x02\n" + - "\aMatcher\x129\n" + - "\vGetMetadata\x12\x0f.matchers.Empty\x1a\x19.matchers.MatcherMetadata\x12(\n" + - "\x04Init\x12\x0f.matchers.Empty\x1a\x0f.matchers.Empty\x128\n" + - "\x05Match\x12\x16.matchers.MatchRequest\x1a\x17.matchers.MatchResponse\x12G\n" + + "\amatched\x18\x01 \x03(\bR\amatched2\xd4\x01\n" + + "\aMatcher\x12(\n" + + "\x04Init\x12\x0f.matchers.Empty\x1a\x0f.matchers.Empty\x12G\n" + "\n" + "MatchBatch\x12\x1b.matchers.MatchBatchRequest\x1a\x1c.matchers.MatchBatchResponse\x12,\n" + "\bShutdown\x12\x0f.matchers.Empty\x1a\x0f.matchers.Empty\x12(\n" + @@ -405,36 +220,28 @@ func file_pkg_matchers_rpc_matchers_matcher_proto_rawDescGZIP() []byte { return file_pkg_matchers_rpc_matchers_matcher_proto_rawDescData } -var file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_matchers_rpc_matchers_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_pkg_matchers_rpc_matchers_matcher_proto_goTypes = []any{ (*Empty)(nil), // 0: matchers.Empty - (*MatcherMetadata)(nil), // 1: matchers.MatcherMetadata - (*Event)(nil), // 2: matchers.Event - (*MatchRequest)(nil), // 3: matchers.MatchRequest - (*MatchResponse)(nil), // 4: matchers.MatchResponse - (*MatchBatchRequest)(nil), // 5: matchers.MatchBatchRequest - (*MatchBatchResponse)(nil), // 6: matchers.MatchBatchResponse + (*Event)(nil), // 1: matchers.Event + (*MatchBatchRequest)(nil), // 2: matchers.MatchBatchRequest + (*MatchBatchResponse)(nil), // 3: matchers.MatchBatchResponse } var file_pkg_matchers_rpc_matchers_matcher_proto_depIdxs = []int32{ - 2, // 0: matchers.MatchRequest.event:type_name -> matchers.Event - 2, // 1: matchers.MatchBatchRequest.events:type_name -> matchers.Event - 0, // 2: matchers.Matcher.GetMetadata:input_type -> matchers.Empty - 0, // 3: matchers.Matcher.Init:input_type -> matchers.Empty - 3, // 4: matchers.Matcher.Match:input_type -> matchers.MatchRequest - 5, // 5: matchers.Matcher.MatchBatch:input_type -> matchers.MatchBatchRequest - 0, // 6: matchers.Matcher.Shutdown:input_type -> matchers.Empty - 0, // 7: matchers.Matcher.Ping:input_type -> matchers.Empty - 1, // 8: matchers.Matcher.GetMetadata:output_type -> matchers.MatcherMetadata - 0, // 9: matchers.Matcher.Init:output_type -> matchers.Empty - 4, // 10: matchers.Matcher.Match:output_type -> matchers.MatchResponse - 6, // 11: matchers.Matcher.MatchBatch:output_type -> matchers.MatchBatchResponse - 0, // 12: matchers.Matcher.Shutdown:output_type -> matchers.Empty - 0, // 13: matchers.Matcher.Ping:output_type -> matchers.Empty - 8, // [8:14] is the sub-list for method output_type - 2, // [2:8] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 1, // 0: matchers.MatchBatchRequest.events:type_name -> matchers.Event + 0, // 1: matchers.Matcher.Init:input_type -> matchers.Empty + 2, // 2: matchers.Matcher.MatchBatch:input_type -> matchers.MatchBatchRequest + 0, // 3: matchers.Matcher.Shutdown:input_type -> matchers.Empty + 0, // 4: matchers.Matcher.Ping:input_type -> matchers.Empty + 0, // 5: matchers.Matcher.Init:output_type -> matchers.Empty + 3, // 6: matchers.Matcher.MatchBatch:output_type -> matchers.MatchBatchResponse + 0, // 7: matchers.Matcher.Shutdown:output_type -> matchers.Empty + 0, // 8: matchers.Matcher.Ping:output_type -> matchers.Empty + 5, // [5:9] is the sub-list for method output_type + 1, // [1:5] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_pkg_matchers_rpc_matchers_matcher_proto_init() } @@ -448,7 +255,7 @@ func file_pkg_matchers_rpc_matchers_matcher_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc), len(file_pkg_matchers_rpc_matchers_matcher_proto_rawDesc)), NumEnums: 0, - NumMessages: 7, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/matchers/rpc_matchers/matcher.proto b/pkg/matchers/rpc_matchers/matcher.proto index 15bcf76..72f4433 100644 --- a/pkg/matchers/rpc_matchers/matcher.proto +++ b/pkg/matchers/rpc_matchers/matcher.proto @@ -3,25 +3,16 @@ package matchers; option go_package = "rpc_matchers/;rpc_matchers"; message Empty {} -message MatcherMetadata { - string id = 1; - string name = 2; - string description = 3; - bool enabled = 4; - bool global = 5; - string version = 6; -} -message Event { bytes json = 1; } -message MatchRequest { Event event = 1; } -message MatchResponse { bool matched = 1; } + +message Event { bytes json = 1; } + message MatchBatchRequest { repeated Event events = 1; } -message MatchBatchResponse { repeated bool matched = 1; } + +message MatchBatchResponse { repeated bool matched = 1; } service Matcher { - rpc GetMetadata(Empty) returns (MatcherMetadata); - rpc Init(Empty) returns (Empty); - rpc Match(MatchRequest) returns (MatchResponse); - rpc MatchBatch(MatchBatchRequest) returns (MatchBatchResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc Init(Empty) returns (Empty); + rpc MatchBatch(MatchBatchRequest) returns (MatchBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/matchers/rpc_matchers/matcher_grpc.pb.go b/pkg/matchers/rpc_matchers/matcher_grpc.pb.go index a76a5d6..49666b9 100644 --- a/pkg/matchers/rpc_matchers/matcher_grpc.pb.go +++ b/pkg/matchers/rpc_matchers/matcher_grpc.pb.go @@ -19,21 +19,17 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - Matcher_GetMetadata_FullMethodName = "/matchers.Matcher/GetMetadata" - Matcher_Init_FullMethodName = "/matchers.Matcher/Init" - Matcher_Match_FullMethodName = "/matchers.Matcher/Match" - Matcher_MatchBatch_FullMethodName = "/matchers.Matcher/MatchBatch" - Matcher_Shutdown_FullMethodName = "/matchers.Matcher/Shutdown" - Matcher_Ping_FullMethodName = "/matchers.Matcher/Ping" + Matcher_Init_FullMethodName = "/matchers.Matcher/Init" + Matcher_MatchBatch_FullMethodName = "/matchers.Matcher/MatchBatch" + Matcher_Shutdown_FullMethodName = "/matchers.Matcher/Shutdown" + Matcher_Ping_FullMethodName = "/matchers.Matcher/Ping" ) // MatcherClient is the client API for Matcher service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type MatcherClient interface { - GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MatcherMetadata, error) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - Match(ctx context.Context, in *MatchRequest, opts ...grpc.CallOption) (*MatchResponse, error) MatchBatch(ctx context.Context, in *MatchBatchRequest, opts ...grpc.CallOption) (*MatchBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -47,16 +43,6 @@ func NewMatcherClient(cc grpc.ClientConnInterface) MatcherClient { return &matcherClient{cc} } -func (c *matcherClient) GetMetadata(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MatcherMetadata, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(MatcherMetadata) - err := c.cc.Invoke(ctx, Matcher_GetMetadata_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *matcherClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) @@ -67,16 +53,6 @@ func (c *matcherClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOp return out, nil } -func (c *matcherClient) Match(ctx context.Context, in *MatchRequest, opts ...grpc.CallOption) (*MatchResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(MatchResponse) - err := c.cc.Invoke(ctx, Matcher_Match_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *matcherClient) MatchBatch(ctx context.Context, in *MatchBatchRequest, opts ...grpc.CallOption) (*MatchBatchResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(MatchBatchResponse) @@ -111,9 +87,7 @@ func (c *matcherClient) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOp // All implementations must embed UnimplementedMatcherServer // for forward compatibility. type MatcherServer interface { - GetMetadata(context.Context, *Empty) (*MatcherMetadata, error) Init(context.Context, *Empty) (*Empty, error) - Match(context.Context, *MatchRequest) (*MatchResponse, error) MatchBatch(context.Context, *MatchBatchRequest) (*MatchBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) @@ -127,15 +101,9 @@ type MatcherServer interface { // pointer dereference when methods are called. type UnimplementedMatcherServer struct{} -func (UnimplementedMatcherServer) GetMetadata(context.Context, *Empty) (*MatcherMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") -} func (UnimplementedMatcherServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } -func (UnimplementedMatcherServer) Match(context.Context, *MatchRequest) (*MatchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Match not implemented") -} func (UnimplementedMatcherServer) MatchBatch(context.Context, *MatchBatchRequest) (*MatchBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MatchBatch not implemented") } @@ -166,24 +134,6 @@ func RegisterMatcherServer(s grpc.ServiceRegistrar, srv MatcherServer) { s.RegisterService(&Matcher_ServiceDesc, srv) } -func _Matcher_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MatcherServer).GetMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Matcher_GetMetadata_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatcherServer).GetMetadata(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _Matcher_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { @@ -202,24 +152,6 @@ func _Matcher_Init_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _Matcher_Match_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MatchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MatcherServer).Match(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Matcher_Match_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatcherServer).Match(ctx, req.(*MatchRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Matcher_MatchBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MatchBatchRequest) if err := dec(in); err != nil { @@ -281,18 +213,10 @@ var Matcher_ServiceDesc = grpc.ServiceDesc{ ServiceName: "matchers.Matcher", HandlerType: (*MatcherServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetadata", - Handler: _Matcher_GetMetadata_Handler, - }, { MethodName: "Init", Handler: _Matcher_Init_Handler, }, - { - MethodName: "Match", - Handler: _Matcher_Match_Handler, - }, { MethodName: "MatchBatch", Handler: _Matcher_MatchBatch_Handler, diff --git a/pkg/rules/pool.go b/pkg/rules/pool.go index cb7c672..4c846de 100644 --- a/pkg/rules/pool.go +++ b/pkg/rules/pool.go @@ -13,7 +13,6 @@ import ( type Pool struct { *internal.ProcessPool[Rule] - manager *RuleConfigManager } func NewPool(manager *RuleConfigManager, drainTimeout time.Duration) *Pool { @@ -23,7 +22,6 @@ func NewPool(manager *RuleConfigManager, drainTimeout time.Duration) *Pool { } return &Pool{ ProcessPool: internal.NewProcessPool[Rule](routing, internal.NewPoolMetrics("rules"), drainTimeout), - manager: manager, } } diff --git a/pkg/rules/rpc_rules/rule.pb.go b/pkg/rules/rpc_rules/rule.pb.go index 361feb0..47c1e97 100644 --- a/pkg/rules/rpc_rules/rule.pb.go +++ b/pkg/rules/rpc_rules/rule.pb.go @@ -101,94 +101,6 @@ func (x *Event) GetJson() []byte { return nil } -type EvaluateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EvaluateRequest) Reset() { - *x = EvaluateRequest{} - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EvaluateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvaluateRequest) ProtoMessage() {} - -func (x *EvaluateRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvaluateRequest.ProtoReflect.Descriptor instead. -func (*EvaluateRequest) Descriptor() ([]byte, []int) { - return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{2} -} - -func (x *EvaluateRequest) GetEvent() *Event { - if x != nil { - return x.Event - } - return nil -} - -type EvaluateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Matched bool `protobuf:"varint,1,opt,name=matched,proto3" json:"matched,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EvaluateResponse) Reset() { - *x = EvaluateResponse{} - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EvaluateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EvaluateResponse) ProtoMessage() {} - -func (x *EvaluateResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EvaluateResponse.ProtoReflect.Descriptor instead. -func (*EvaluateResponse) Descriptor() ([]byte, []int) { - return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{3} -} - -func (x *EvaluateResponse) GetMatched() bool { - if x != nil { - return x.Matched - } - return false -} - type EvaluateBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` @@ -198,7 +110,7 @@ type EvaluateBatchRequest struct { func (x *EvaluateBatchRequest) Reset() { *x = EvaluateBatchRequest{} - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -210,7 +122,7 @@ func (x *EvaluateBatchRequest) String() string { func (*EvaluateBatchRequest) ProtoMessage() {} func (x *EvaluateBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -223,7 +135,7 @@ func (x *EvaluateBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateBatchRequest.ProtoReflect.Descriptor instead. func (*EvaluateBatchRequest) Descriptor() ([]byte, []int) { - return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{4} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{2} } func (x *EvaluateBatchRequest) GetEvents() []*Event { @@ -249,7 +161,7 @@ type EventResult struct { func (x *EventResult) Reset() { *x = EventResult{} - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[5] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -261,7 +173,7 @@ func (x *EventResult) String() string { func (*EventResult) ProtoMessage() {} func (x *EventResult) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[5] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -274,7 +186,7 @@ func (x *EventResult) ProtoReflect() protoreflect.Message { // Deprecated: Use EventResult.ProtoReflect.Descriptor instead. func (*EventResult) Descriptor() ([]byte, []int) { - return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{5} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{3} } func (x *EventResult) GetMatched() bool { @@ -328,7 +240,7 @@ type EvaluateBatchResponse struct { func (x *EvaluateBatchResponse) Reset() { *x = EvaluateBatchResponse{} - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[6] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -340,7 +252,7 @@ func (x *EvaluateBatchResponse) String() string { func (*EvaluateBatchResponse) ProtoMessage() {} func (x *EvaluateBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[6] + mi := &file_pkg_rules_rpc_rules_rule_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -353,7 +265,7 @@ func (x *EvaluateBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EvaluateBatchResponse.ProtoReflect.Descriptor instead. func (*EvaluateBatchResponse) Descriptor() ([]byte, []int) { - return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{6} + return file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP(), []int{4} } func (x *EvaluateBatchResponse) GetResults() []*EventResult { @@ -370,11 +282,7 @@ const file_pkg_rules_rpc_rules_rule_proto_rawDesc = "" + "\x1epkg/rules/rpc_rules/rule.proto\x12\x05rules\"\a\n" + "\x05Empty\"\x1b\n" + "\x05Event\x12\x12\n" + - "\x04json\x18\x01 \x01(\fR\x04json\"5\n" + - "\x0fEvaluateRequest\x12\"\n" + - "\x05event\x18\x01 \x01(\v2\f.rules.EventR\x05event\",\n" + - "\x10EvaluateResponse\x12\x18\n" + - "\amatched\x18\x01 \x01(\bR\amatched\"<\n" + + "\x04json\x18\x01 \x01(\fR\x04json\"<\n" + "\x14EvaluateBatchRequest\x12$\n" + "\x06events\x18\x01 \x03(\v2\f.rules.EventR\x06events\"\xc2\x01\n" + "\vEventResult\x12\x18\n" + @@ -385,10 +293,9 @@ const file_pkg_rules_rpc_rules_rule_proto_rawDesc = "" + "\fcontext_json\x18\x05 \x01(\fR\vcontextJson\x12\"\n" + "\rmerge_by_keys\x18\x06 \x03(\tR\vmergeByKeys\"E\n" + "\x15EvaluateBatchResponse\x12,\n" + - "\aresults\x18\x02 \x03(\v2\x12.rules.EventResultR\aresults2\xff\x01\n" + + "\aresults\x18\x02 \x03(\v2\x12.rules.EventResultR\aresults2\xc2\x01\n" + "\x04Rule\x12\"\n" + - "\x04Init\x12\f.rules.Empty\x1a\f.rules.Empty\x12;\n" + - "\bEvaluate\x12\x16.rules.EvaluateRequest\x1a\x17.rules.EvaluateResponse\x12J\n" + + "\x04Init\x12\f.rules.Empty\x1a\f.rules.Empty\x12J\n" + "\rEvaluateBatch\x12\x1b.rules.EvaluateBatchRequest\x1a\x1c.rules.EvaluateBatchResponse\x12&\n" + "\bShutdown\x12\f.rules.Empty\x1a\f.rules.Empty\x12\"\n" + "\x04Ping\x12\f.rules.Empty\x1a\f.rules.EmptyB\x16Z\x14rpc_rules/;rpc_rulesb\x06proto3" @@ -405,35 +312,30 @@ func file_pkg_rules_rpc_rules_rule_proto_rawDescGZIP() []byte { return file_pkg_rules_rpc_rules_rule_proto_rawDescData } -var file_pkg_rules_rpc_rules_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pkg_rules_rpc_rules_rule_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_pkg_rules_rpc_rules_rule_proto_goTypes = []any{ (*Empty)(nil), // 0: rules.Empty (*Event)(nil), // 1: rules.Event - (*EvaluateRequest)(nil), // 2: rules.EvaluateRequest - (*EvaluateResponse)(nil), // 3: rules.EvaluateResponse - (*EvaluateBatchRequest)(nil), // 4: rules.EvaluateBatchRequest - (*EventResult)(nil), // 5: rules.EventResult - (*EvaluateBatchResponse)(nil), // 6: rules.EvaluateBatchResponse + (*EvaluateBatchRequest)(nil), // 2: rules.EvaluateBatchRequest + (*EventResult)(nil), // 3: rules.EventResult + (*EvaluateBatchResponse)(nil), // 4: rules.EvaluateBatchResponse } var file_pkg_rules_rpc_rules_rule_proto_depIdxs = []int32{ - 1, // 0: rules.EvaluateRequest.event:type_name -> rules.Event - 1, // 1: rules.EvaluateBatchRequest.events:type_name -> rules.Event - 5, // 2: rules.EvaluateBatchResponse.results:type_name -> rules.EventResult - 0, // 3: rules.Rule.Init:input_type -> rules.Empty - 2, // 4: rules.Rule.Evaluate:input_type -> rules.EvaluateRequest - 4, // 5: rules.Rule.EvaluateBatch:input_type -> rules.EvaluateBatchRequest - 0, // 6: rules.Rule.Shutdown:input_type -> rules.Empty - 0, // 7: rules.Rule.Ping:input_type -> rules.Empty - 0, // 8: rules.Rule.Init:output_type -> rules.Empty - 3, // 9: rules.Rule.Evaluate:output_type -> rules.EvaluateResponse - 6, // 10: rules.Rule.EvaluateBatch:output_type -> rules.EvaluateBatchResponse - 0, // 11: rules.Rule.Shutdown:output_type -> rules.Empty - 0, // 12: rules.Rule.Ping:output_type -> rules.Empty - 8, // [8:13] is the sub-list for method output_type - 3, // [3:8] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 1, // 0: rules.EvaluateBatchRequest.events:type_name -> rules.Event + 3, // 1: rules.EvaluateBatchResponse.results:type_name -> rules.EventResult + 0, // 2: rules.Rule.Init:input_type -> rules.Empty + 2, // 3: rules.Rule.EvaluateBatch:input_type -> rules.EvaluateBatchRequest + 0, // 4: rules.Rule.Shutdown:input_type -> rules.Empty + 0, // 5: rules.Rule.Ping:input_type -> rules.Empty + 0, // 6: rules.Rule.Init:output_type -> rules.Empty + 4, // 7: rules.Rule.EvaluateBatch:output_type -> rules.EvaluateBatchResponse + 0, // 8: rules.Rule.Shutdown:output_type -> rules.Empty + 0, // 9: rules.Rule.Ping:output_type -> rules.Empty + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_pkg_rules_rpc_rules_rule_proto_init() } @@ -447,7 +349,7 @@ func file_pkg_rules_rpc_rules_rule_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_rules_rpc_rules_rule_proto_rawDesc), len(file_pkg_rules_rpc_rules_rule_proto_rawDesc)), NumEnums: 0, - NumMessages: 7, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/rules/rpc_rules/rule.proto b/pkg/rules/rpc_rules/rule.proto index c7f22d9..a2da5b1 100644 --- a/pkg/rules/rpc_rules/rule.proto +++ b/pkg/rules/rpc_rules/rule.proto @@ -1,26 +1,22 @@ syntax = "proto3"; package rules; - option go_package = "rpc_rules/;rpc_rules"; message Empty {} message Event { bytes json = 1; } -message EvaluateRequest { Event event = 1; } -message EvaluateResponse { bool matched = 1; } - message EvaluateBatchRequest { repeated Event events = 1; } // EventResult carries the match outcome and any optional per-event overrides. // Empty string / nil fields mean "use YAML default". message EventResult { - bool matched = 1; - string title = 2; - string description = 3; - string severity = 4; // "info|low|medium|high|critical"; "" = YAML default - bytes context_json = 5; // JSON-encoded map[string]any; empty = nil - repeated string merge_by_keys = 6; // empty = use YAML merge_by_keys + bool matched = 1; + string title = 2; + string description = 3; + string severity = 4; // "info|low|medium|high|critical"; "" = YAML default + bytes context_json = 5; // JSON-encoded map[string]any; empty = nil + repeated string merge_by_keys = 6; // empty = use YAML merge_by_keys } message EvaluateBatchResponse { @@ -28,9 +24,8 @@ message EvaluateBatchResponse { } service Rule { - rpc Init(Empty) returns (Empty); - rpc Evaluate(EvaluateRequest) returns (EvaluateResponse); - rpc EvaluateBatch(EvaluateBatchRequest) returns (EvaluateBatchResponse); - rpc Shutdown(Empty) returns (Empty); - rpc Ping(Empty) returns (Empty); + rpc Init(Empty) returns (Empty); + rpc EvaluateBatch(EvaluateBatchRequest) returns (EvaluateBatchResponse); + rpc Shutdown(Empty) returns (Empty); + rpc Ping(Empty) returns (Empty); } diff --git a/pkg/rules/rpc_rules/rule_grpc.pb.go b/pkg/rules/rpc_rules/rule_grpc.pb.go index f13087e..125aefb 100644 --- a/pkg/rules/rpc_rules/rule_grpc.pb.go +++ b/pkg/rules/rpc_rules/rule_grpc.pb.go @@ -20,7 +20,6 @@ const _ = grpc.SupportPackageIsVersion9 const ( Rule_Init_FullMethodName = "/rules.Rule/Init" - Rule_Evaluate_FullMethodName = "/rules.Rule/Evaluate" Rule_EvaluateBatch_FullMethodName = "/rules.Rule/EvaluateBatch" Rule_Shutdown_FullMethodName = "/rules.Rule/Shutdown" Rule_Ping_FullMethodName = "/rules.Rule/Ping" @@ -31,7 +30,6 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type RuleClient interface { Init(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) EvaluateBatch(ctx context.Context, in *EvaluateBatchRequest, opts ...grpc.CallOption) (*EvaluateBatchResponse, error) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) @@ -55,16 +53,6 @@ func (c *ruleClient) Init(ctx context.Context, in *Empty, opts ...grpc.CallOptio return out, nil } -func (c *ruleClient) Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(EvaluateResponse) - err := c.cc.Invoke(ctx, Rule_Evaluate_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *ruleClient) EvaluateBatch(ctx context.Context, in *EvaluateBatchRequest, opts ...grpc.CallOption) (*EvaluateBatchResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(EvaluateBatchResponse) @@ -100,7 +88,6 @@ func (c *ruleClient) Ping(ctx context.Context, in *Empty, opts ...grpc.CallOptio // for forward compatibility. type RuleServer interface { Init(context.Context, *Empty) (*Empty, error) - Evaluate(context.Context, *EvaluateRequest) (*EvaluateResponse, error) EvaluateBatch(context.Context, *EvaluateBatchRequest) (*EvaluateBatchResponse, error) Shutdown(context.Context, *Empty) (*Empty, error) Ping(context.Context, *Empty) (*Empty, error) @@ -117,9 +104,6 @@ type UnimplementedRuleServer struct{} func (UnimplementedRuleServer) Init(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") } -func (UnimplementedRuleServer) Evaluate(context.Context, *EvaluateRequest) (*EvaluateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Evaluate not implemented") -} func (UnimplementedRuleServer) EvaluateBatch(context.Context, *EvaluateBatchRequest) (*EvaluateBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EvaluateBatch not implemented") } @@ -168,24 +152,6 @@ func _Rule_Init_Handler(srv interface{}, ctx context.Context, dec func(interface return interceptor(ctx, in, info, handler) } -func _Rule_Evaluate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EvaluateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RuleServer).Evaluate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Rule_Evaluate_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RuleServer).Evaluate(ctx, req.(*EvaluateRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Rule_EvaluateBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EvaluateBatchRequest) if err := dec(in); err != nil { @@ -251,10 +217,6 @@ var Rule_ServiceDesc = grpc.ServiceDesc{ MethodName: "Init", Handler: _Rule_Init_Handler, }, - { - MethodName: "Evaluate", - Handler: _Rule_Evaluate_Handler, - }, { MethodName: "EvaluateBatch", Handler: _Rule_EvaluateBatch_Handler, diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go index dd50c06..b58e152 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.pb.go @@ -59,7 +59,7 @@ func (*Empty) Descriptor() ([]byte, []int) { type TuneBatchRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` // one JSON-encoded map[string]any per alert + AlertJson [][]byte `protobuf:"bytes,1,rep,name=alert_json,json=alertJson,proto3" json:"alert_json,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -103,7 +103,7 @@ func (x *TuneBatchRequest) GetAlertJson() [][]byte { type TuneBatchResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Applies []bool `protobuf:"varint,1,rep,packed,name=applies,proto3" json:"applies,omitempty"` // one result per alert + Applies []bool `protobuf:"varint,1,rep,packed,name=applies,proto3" json:"applies,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -161,7 +161,7 @@ const file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDesc = "" + "\x04Init\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x12L\n" + "\tTuneBatch\x12\x1e.tuning_rules.TuneBatchRequest\x1a\x1f.tuning_rules.TuneBatchResponse\x124\n" + "\bShutdown\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.Empty\x120\n" + - "\x04Ping\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.EmptyB?Z=github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rulesb\x06proto3" + "\x04Ping\x12\x13.tuning_rules.Empty\x1a\x13.tuning_rules.EmptyB$Z\"rpc_tuning_rules/;rpc_tuning_rulesb\x06proto3" var ( file_pkg_tuning_rules_rpc_tuning_rules_tuning_rule_proto_rawDescOnce sync.Once diff --git a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto index 59721a4..e33464c 100644 --- a/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto +++ b/pkg/tuning_rules/rpc_tuning_rules/tuning_rule.proto @@ -1,18 +1,10 @@ syntax = "proto3"; - package tuning_rules; - -option go_package = "github.com/harishhary/blink/pkg/tuning_rules/rpc_tuning_rules"; +option go_package = "rpc_tuning_rules/;rpc_tuning_rules"; message Empty {} - -message TuneBatchRequest { - repeated bytes alert_json = 1; // one JSON-encoded map[string]any per alert -} - -message TuneBatchResponse { - repeated bool applies = 1; // one result per alert -} +message TuneBatchRequest { repeated bytes alert_json = 1; } // one JSON-encoded map[string]any per alert +message TuneBatchResponse { repeated bool applies = 1; } // one result per alert service TuningRule { rpc Init(Empty) returns (Empty);