diff --git a/Corefile b/Corefile index 734b78e..44d2f2f 100644 --- a/Corefile +++ b/Corefile @@ -5,6 +5,19 @@ libp2p.direct { errors any # RFC 8482 prometheus localhost:9253 + denylist { + # Spamhaus DROP: hijacked IP ranges used for spam and malware + # Spamhaus recommends once per day; 12h is a reasonable compromise + feed https://www.spamhaus.org/drop/drop.txt format=ip refresh=12h name=spamhaus-drop + feed https://www.spamhaus.org/drop/dropv6.txt format=ip refresh=12h name=spamhaus-dropv6 + # URLhaus: malware distribution URLs (IPs extracted) + # URLhaus updates every 5 minutes; use their stated minimum + feed https://urlhaus.abuse.ch/downloads/text/ format=url refresh=5m name=urlhaus + # Local allowlist: bypasses all denylists (own infrastructure, feed false positives) + # file ip-allowlist.txt type=allow + # Local denylist: quick blocks without waiting for feed updates + # file ip-denylist.txt + } ipparser libp2p.direct file zones/libp2p.direct acme libp2p.direct { diff --git a/Corefile.local-dev b/Corefile.local-dev index 62fb7c3..4575898 100644 --- a/Corefile.local-dev +++ b/Corefile.local-dev @@ -5,6 +5,19 @@ libp2p.direct { errors any # RFC 8482 prometheus localhost:9253 + denylist { + # Spamhaus DROP: hijacked IP ranges used for spam and malware + # Spamhaus recommends once per day; 12h is a reasonable compromise + feed https://www.spamhaus.org/drop/drop.txt format=ip refresh=12h name=spamhaus-drop + feed https://www.spamhaus.org/drop/dropv6.txt format=ip refresh=12h name=spamhaus-dropv6 + # URLhaus: malware distribution URLs (IPs extracted) + # URLhaus updates every 5 minutes; use their stated minimum + feed https://urlhaus.abuse.ch/downloads/text/ format=url refresh=5m name=urlhaus + # Local allowlist: bypasses all denylists (own infrastructure, feed false positives) + # file ip-allowlist.txt type=allow + # Local denylist: quick blocks without waiting for feed updates + # file ip-denylist.txt + } ipparser libp2p.direct file zones/libp2p.direct acme libp2p.direct { diff --git a/Dockerfile b/Dockerfile index ec38748..556634a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.23-bookworm AS builder +FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.24-bookworm AS builder LABEL org.opencontainers.image.source=https://github.com/ipshipyard/p2p-forge LABEL org.opencontainers.image.documentation=https://github.com/ipshipyard/p2p-forge#docker diff --git a/README.md b/README.md index bc0d214..2c8d131 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,14 @@ acme FORGE_DOMAIN { - `dynamo TABLE_NAME` for production-grade key-value store shared across multiple instances (where all credentials are set via AWS' standard environment variables: `AWS_REGION`, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`) - `badger DB_PATH` for local key-value store (good for local development and testing) +#### Denylists + +Optional plugin for blocking IPs from requesting certificates and resolving `A`/`AAAA` records. See [docs/denylist.md](docs/denylist.md) for configuration details. + +#### Metrics + +Prometheus metrics are exposed via the standard CoreDNS metrics plugin. See [docs/METRICS.md](docs/METRICS.md) for p2p-forge specific metrics. + ### Example Below is a basic example of starting a DNS server that handles the IP based domain names as well as ACME challenges. diff --git a/acme/clientip.go b/acme/clientip.go new file mode 100644 index 0000000..16729cc --- /dev/null +++ b/acme/clientip.go @@ -0,0 +1,70 @@ +package acme + +import ( + "net" + "net/http" + "net/netip" + "strings" + + "github.com/multiformats/go-multiaddr" +) + +// clientIPs extracts client IPs from request: both X-Forwarded-For and RemoteAddr. +// Returns all valid IPs found (may be 0, 1, or 2 IPs). +// +// X-Forwarded-For spoofing is not a security concern here because: +// 1. We also check all IPs from the multiaddrs in the request body +// 2. The actual A/AAAA record being requested must match a multiaddr IP +// 3. An attacker cannot spoof the multiaddr IPs they're connecting from +// +// The client IP check is defense-in-depth; the multiaddr check is authoritative. +func clientIPs(r *http.Request) []netip.Addr { + var ips []netip.Addr + + // Check X-Forwarded-For (leftmost = original client) + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + if comma := strings.Index(xff, ","); comma != -1 { + xff = xff[:comma] + } + xff = strings.TrimSpace(xff) + if ip, err := netip.ParseAddr(xff); err == nil { + ips = append(ips, ip) + } + } + + // Also check RemoteAddr (direct connection IP) + host, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + host = r.RemoteAddr + } + if ip, err := netip.ParseAddr(host); err == nil { + ips = append(ips, ip) + } + + return ips +} + +// multiaddrsToIPs extracts IP addresses from multiaddr strings. +func multiaddrsToIPs(addrs []string) []netip.Addr { + ips := make([]netip.Addr, 0, len(addrs)) + for _, addr := range addrs { + ma, err := multiaddr.NewMultiaddr(addr) + if err != nil { + continue + } + // Try IPv4 + if val, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil { + if ip, err := netip.ParseAddr(val); err == nil { + ips = append(ips, ip) + continue + } + } + // Try IPv6 + if val, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil { + if ip, err := netip.ParseAddr(val); err == nil { + ips = append(ips, ip) + } + } + } + return ips +} diff --git a/acme/clientip_test.go b/acme/clientip_test.go new file mode 100644 index 0000000..2df8e8b --- /dev/null +++ b/acme/clientip_test.go @@ -0,0 +1,139 @@ +package acme + +import ( + "net/http" + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClientIPs(t *testing.T) { + tests := []struct { + name string + xff string + remoteAddr string + expected []netip.Addr + }{ + { + name: "XFF single IP", + xff: "1.2.3.4", + remoteAddr: "", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "XFF multiple IPs uses leftmost", + xff: "1.2.3.4, 5.6.7.8, 9.10.11.12", + remoteAddr: "", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "RemoteAddr IPv4 with port", + xff: "", + remoteAddr: "1.2.3.4:8080", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "RemoteAddr IPv6 with port", + xff: "", + remoteAddr: "[::1]:8080", + expected: []netip.Addr{netip.MustParseAddr("::1")}, + }, + { + name: "both XFF and RemoteAddr", + xff: "1.2.3.4", + remoteAddr: "5.6.7.8:8080", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("5.6.7.8")}, + }, + { + name: "empty headers", + xff: "", + remoteAddr: "", + expected: nil, + }, + { + name: "XFF with spaces", + xff: " 1.2.3.4 ", + remoteAddr: "", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "invalid XFF skipped", + xff: "not-an-ip", + remoteAddr: "1.2.3.4:80", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "RemoteAddr without port", + xff: "", + remoteAddr: "1.2.3.4", + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &http.Request{ + Header: make(http.Header), + RemoteAddr: tt.remoteAddr, + } + if tt.xff != "" { + r.Header.Set("X-Forwarded-For", tt.xff) + } + + got := clientIPs(r) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestMultiaddrsToIPs(t *testing.T) { + tests := []struct { + name string + addrs []string + expected []netip.Addr + }{ + { + name: "IPv4 multiaddr", + addrs: []string{"/ip4/1.2.3.4/tcp/4001"}, + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "IPv6 multiaddr", + addrs: []string{"/ip6/2001:db8::1/tcp/4001"}, + expected: []netip.Addr{netip.MustParseAddr("2001:db8::1")}, + }, + { + name: "mixed IPv4 and IPv6", + addrs: []string{"/ip4/1.2.3.4/tcp/4001", "/ip6/::1/tcp/4001"}, + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("::1")}, + }, + { + name: "invalid multiaddr skipped", + addrs: []string{"not-a-multiaddr", "/ip4/1.2.3.4/tcp/4001"}, + expected: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + }, + { + name: "empty input", + addrs: []string{}, + expected: []netip.Addr{}, + }, + { + name: "nil input", + addrs: nil, + expected: []netip.Addr{}, + }, + { + name: "multiaddr without IP", + addrs: []string{"/dns4/example.com/tcp/4001"}, + expected: []netip.Addr{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := multiaddrsToIPs(tt.addrs) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/acme/writer.go b/acme/writer.go index c5967a0..8357ec9 100644 --- a/acme/writer.go +++ b/acme/writer.go @@ -11,6 +11,7 @@ import ( "io" "net" "net/http" + "net/netip" "os" "strings" "testing" @@ -20,6 +21,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/felixge/httpsnoop" "github.com/ipshipyard/p2p-forge/client" + "github.com/ipshipyard/p2p-forge/denylist" "github.com/prometheus/client_golang/prometheus" metrics "github.com/slok/go-http-metrics/metrics/prometheus" @@ -151,6 +153,13 @@ func (c *acmeWriter) OnStartup() error { return } + // Check denylist before attempting to connect + if blocked, reason := checkDenylist(clientIPs(r), typedBody.Addresses); blocked { + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(fmt.Sprintf("403 Forbidden: %s", reason))) + return + } + httpUserAgent := r.Header.Get("User-Agent") if err := testAddresses(r.Context(), peerID, typedBody.Addresses, httpUserAgent); err != nil { w.WriteHeader(http.StatusBadRequest) @@ -295,6 +304,35 @@ type requestBody struct { Addresses []string `json:"addresses"` } +// checkDenylist checks client IPs and multiaddr IPs against denylist. +// Returns (blocked, reason) where reason describes which IP was blocked. +// Blocks if ANY IP is denied. +func checkDenylist(clientIPs []netip.Addr, multiaddrs []string) (bool, string) { + mgr := denylist.GetManager() + if mgr == nil { + return false, "" + } + + // Check all client IPs (XFF and RemoteAddr) + for _, client := range clientIPs { + if !client.IsValid() { + continue + } + if denied, result := mgr.Check(client); denied { + return true, fmt.Sprintf("client IP %s blocked by %s", client, result.Name) + } + } + + // Check multiaddr IPs + for _, ip := range multiaddrsToIPs(multiaddrs) { + if denied, result := mgr.Check(ip); denied { + return true, fmt.Sprintf("multiaddr IP %s blocked by %s", ip, result.Name) + } + } + + return false, "" +} + func (c *acmeWriter) OnFinalShutdown() error { if !c.nlSetup { return nil diff --git a/denylist/config.go b/denylist/config.go new file mode 100644 index 0000000..9db40cc --- /dev/null +++ b/denylist/config.go @@ -0,0 +1,188 @@ +package denylist + +import ( + "fmt" + "strings" + "time" + + "github.com/coredns/caddy" +) + +const defaultFeedRefresh = time.Hour + +// parseListTypeValue parses a list type value from configuration. +func parseListTypeValue(v string) (listType, error) { + switch v { + case "allow": + return listTypeAllow, nil + case "deny": + return listTypeDeny, nil + default: + return "", fmt.Errorf("invalid type: %s (expected allow or deny)", v) + } +} + +// parseConfig parses a denylist configuration block from the Corefile. +// Returns nil if there is no denylist block. +// +// Syntax: +// +// denylist { +// file [type=allow|deny] [name=] +// feed format=ip|url [type=allow|deny] [refresh=] [name=] +// } +// +// forgeDomain is used to optimize URL format feeds by extracting IPs directly +// from forge subdomains (e.g., "192-168-1-1.peerid.libp2p.direct") instead of +// doing DNS resolution. +func parseConfig(c *caddy.Controller, baseDir, forgeDomain string) (*Manager, error) { + if !c.NextBlock() { + return nil, nil // no denylist block + } + + mgr := NewManager() + var checkers []checker + +parseLoop: + for { + switch c.Val() { + case "file": + cfg, err := parseFileDirective(c, baseDir) + if err != nil { + return nil, err + } + fl, err := newFileList(cfg) + if err != nil { + return nil, fmt.Errorf("file %s: %w", cfg.Path, err) + } + checkers = append(checkers, fl) + + case "feed": + cfg, err := parseFeedDirective(c, forgeDomain) + if err != nil { + return nil, err + } + fl, err := newFeedList(cfg) + if err != nil { + return nil, fmt.Errorf("feed %s: %w", cfg.URL, err) + } + checkers = append(checkers, fl) + + default: + if c.Val() == "}" { + break parseLoop + } + return nil, fmt.Errorf("unknown directive: %s", c.Val()) + } + + if !c.Next() { + break + } + } + + if len(checkers) == 0 { + return nil, nil // no lists configured + } + + for _, chk := range checkers { + mgr.add(chk) + } + + return mgr, nil +} + +func parseFileDirective(c *caddy.Controller, baseDir string) (fileConfig, error) { + cfg := fileConfig{ + BaseDir: baseDir, + Type: listTypeDeny, + } + + args := c.RemainingArgs() + if len(args) == 0 { + return cfg, c.ArgErr() + } + + cfg.Path = args[0] + + // Parse key=value options + for _, arg := range args[1:] { + kv := strings.SplitN(arg, "=", 2) + if len(kv) != 2 { + return cfg, fmt.Errorf("invalid option: %s (expected key=value)", arg) + } + k, v := kv[0], kv[1] + switch k { + case "type": + t, err := parseListTypeValue(v) + if err != nil { + return cfg, err + } + cfg.Type = t + case "name": + cfg.Name = v + default: + return cfg, fmt.Errorf("unknown file option: %s", k) + } + } + + return cfg, nil +} + +func parseFeedDirective(c *caddy.Controller, forgeDomain string) (feedConfig, error) { + cfg := feedConfig{ + Type: listTypeDeny, + Refresh: defaultFeedRefresh, + ForgeSuffix: forgeDomain, + } + + args := c.RemainingArgs() + if len(args) == 0 { + return cfg, c.ArgErr() + } + + cfg.URL = args[0] + + // Parse key=value options + var hasFormat bool + for _, arg := range args[1:] { + kv := strings.SplitN(arg, "=", 2) + if len(kv) != 2 { + return cfg, fmt.Errorf("invalid option: %s (expected key=value)", arg) + } + k, v := kv[0], kv[1] + switch k { + case "format": + switch v { + case "ip": + cfg.Format = formatIP + case "url": + cfg.Format = formatURL + default: + return cfg, fmt.Errorf("invalid format: %s (expected ip or url)", v) + } + hasFormat = true + case "type": + t, err := parseListTypeValue(v) + if err != nil { + return cfg, err + } + cfg.Type = t + case "refresh": + d, err := time.ParseDuration(v) + if err != nil { + return cfg, fmt.Errorf("invalid refresh duration: %w", err) + } + cfg.Refresh = d + case "name": + cfg.Name = v + default: + return cfg, fmt.Errorf("unknown feed option: %s", k) + } + } + + if !hasFormat { + return cfg, fmt.Errorf("feed directive requires format=ip|url") + } + + return cfg, nil +} diff --git a/denylist/denylist.go b/denylist/denylist.go new file mode 100644 index 0000000..3420c99 --- /dev/null +++ b/denylist/denylist.go @@ -0,0 +1,52 @@ +// Package denylist provides IP address filtering for the p2p-forge DNS and +// ACME registration services. It supports file-based and HTTP feed-based +// deny/allow lists to prevent misuse such as DNS rebinding attacks. +package denylist + +import ( + "net/netip" + + clog "github.com/coredns/coredns/plugin/pkg/log" +) + +var log = clog.NewWithPlugin("denylist") + +// listType indicates whether a list is an allowlist or denylist. +type listType string + +const ( + // listTypeAllow indicates entries that should bypass denylist checks. + listTypeAllow listType = "allow" + // listTypeDeny indicates entries that should be blocked (default). + listTypeDeny listType = "deny" +) + +// feedFormat specifies how to parse external feed content. +type feedFormat string + +const ( + // formatIP parses one IP or CIDR per line with # or ; comments. + // Used for Spamhaus DROP, FireHOL, and custom lists. + formatIP feedFormat = "ip" + // formatURL parses URLs, extracts hosts, resolves domains to IPs. + // Used for URLhaus and similar feeds. + formatURL feedFormat = "url" +) + +// CheckResult contains the outcome of checking an IP against a list. +type CheckResult struct { + Matched bool // whether the IP matched an entry + Name string // source name (e.g., "spamhaus-drop") +} + +// checker checks IP addresses against a list. +type checker interface { + // Check returns whether the IP matches any entry in this list. + Check(ip netip.Addr) CheckResult + // Name returns the name of this checker for metrics/logging. + Name() string + // Type returns whether this is an allow or deny list. + Type() listType + // Size returns the number of entries in the list. + Size() int +} diff --git a/denylist/denylist_test.go b/denylist/denylist_test.go new file mode 100644 index 0000000..89f8346 --- /dev/null +++ b/denylist/denylist_test.go @@ -0,0 +1,818 @@ +package denylist + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/netip" + "os" + "path/filepath" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPrefixSet(t *testing.T) { + ps := newPrefixSet() + + // Initially empty + assert.Equal(t, 0, ps.size()) + + // Add some prefixes + prefixes := []netip.Prefix{ + netip.MustParsePrefix("192.168.1.0/24"), + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("2001:db8::/32"), + } + ps.replace(prefixes) + assert.Equal(t, 3, ps.size()) + + // Test contains - IPv4 in range + assert.True(t, ps.contains(netip.MustParseAddr("192.168.1.100"))) + + // Test contains - IPv4 not in range + assert.False(t, ps.contains(netip.MustParseAddr("172.16.0.1"))) + + // Test contains - IPv6 in range + assert.True(t, ps.contains(netip.MustParseAddr("2001:db8::1"))) + + // Test contains - IPv6 not in range + assert.False(t, ps.contains(netip.MustParseAddr("2001:db9::1"))) + + // Test replace clears old data + ps.replace([]netip.Prefix{netip.MustParsePrefix("1.2.3.4/32")}) + assert.Equal(t, 1, ps.size()) + assert.False(t, ps.contains(netip.MustParseAddr("192.168.1.100"))) +} + +func TestParseIP(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "single IPs", + input: `192.168.1.1 +10.0.0.1 +2001:db8::1`, + expected: []string{"192.168.1.1/32", "10.0.0.1/32", "2001:db8::1/128"}, + }, + { + name: "windows line endings CRLF", + input: "192.168.1.1\r\n10.0.0.1\r\n2001:db8::1\r\n", + expected: []string{"192.168.1.1/32", "10.0.0.1/32", "2001:db8::1/128"}, + }, + { + name: "mixed line endings", + input: "192.168.1.1\n10.0.0.1\r\n172.16.0.1\n", + expected: []string{"192.168.1.1/32", "10.0.0.1/32", "172.16.0.1/32"}, + }, + { + name: "CIDR ranges", + input: `192.168.0.0/16 +10.0.0.0/8`, + expected: []string{"192.168.0.0/16", "10.0.0.0/8"}, + }, + { + name: "with comments", + input: `# This is a comment +192.168.1.1 +; Another comment style +10.0.0.1 ; inline comment +172.16.0.0/12 # inline comment`, + expected: []string{"192.168.1.1/32", "10.0.0.1/32", "172.16.0.0/12"}, + }, + { + name: "empty lines", + input: `192.168.1.1 + +10.0.0.1 + +`, + expected: []string{"192.168.1.1/32", "10.0.0.1/32"}, + }, + { + name: "empty input", + input: "", + expected: []string{}, + }, + { + name: "invalid lines skipped", + input: `192.168.1.1 +not-an-ip +10.0.0.1`, + expected: []string{"192.168.1.1/32", "10.0.0.1/32"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefixes, err := parseIP(strings.NewReader(tt.input)) + require.NoError(t, err) + + got := make([]string, 0, len(prefixes)) + for _, p := range prefixes { + got = append(got, p.String()) + } + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestParseForgeIP(t *testing.T) { + tests := []struct { + name string + host string + forgeSuffix string + wantIP string + wantOK bool + }{ + { + name: "IPv4 forge domain", + host: "192-168-1-1.k51qzi5uqu5dj094.libp2p.direct", + forgeSuffix: ".libp2p.direct", + wantIP: "192.168.1.1", + wantOK: true, + }, + { + name: "IPv6 forge domain", + host: "2001-db8-0-0-0-0-0-1.k51qzi5uqu5dj094.libp2p.direct", + forgeSuffix: ".libp2p.direct", + wantIP: "2001:db8::1", + wantOK: true, + }, + { + name: "IPv6 with leading zero for RFC 1035", + host: "0--1.k51qzi5uqu5dj094.libp2p.direct", + forgeSuffix: ".libp2p.direct", + wantIP: "::1", + wantOK: true, + }, + { + name: "invalid IP in forge domain", + host: "not-an-ip.k51qzi5uqu5dj094.libp2p.direct", + forgeSuffix: ".libp2p.direct", + wantOK: false, + }, + { + name: "non-matching suffix", + host: "192-168-1-1.example.com", + forgeSuffix: ".libp2p.direct", + wantOK: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ip, ok := parseForgeIP(tt.host, tt.forgeSuffix) + assert.Equal(t, tt.wantOK, ok) + if tt.wantOK { + assert.Equal(t, tt.wantIP, ip.String()) + } + }) + } +} + +func TestParseURL(t *testing.T) { + tests := []struct { + name string + input string + forgeSuffix string + wantCount int + wantIPs []string // subset to check + }{ + { + name: "URLs with IP hosts", + input: `http://192.168.1.1/malware.exe +https://10.0.0.1:8080/bad.js`, + wantCount: 2, + wantIPs: []string{"192.168.1.1", "10.0.0.1"}, + }, + { + name: "URLs with CRLF line endings", + input: "http://192.168.1.1/file\r\nhttp://10.0.0.1/file\r\n", + wantCount: 2, + wantIPs: []string{"192.168.1.1", "10.0.0.1"}, + }, + { + name: "forge domain extraction", + input: `http://192-168-1-1.k51qzi5uqu5dj094.libp2p.direct/file +http://10-0-0-1.k51qzi5uqu5dj094.libp2p.direct/file`, + forgeSuffix: "libp2p.direct", + wantCount: 2, + wantIPs: []string{"192.168.1.1", "10.0.0.1"}, + }, + { + name: "with comments", + input: `# comment +http://192.168.1.1/file`, + wantCount: 1, + }, + { + name: "empty input", + input: "", + wantCount: 0, + }, + { + name: "deduplicate IPs", + input: `http://192.168.1.1/file1 +http://192.168.1.1/file2`, + wantCount: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefixes, err := parseURL(strings.NewReader(tt.input), parseURLOptions{ + ForgeSuffix: tt.forgeSuffix, + }) + require.NoError(t, err) + assert.Equal(t, tt.wantCount, len(prefixes)) + + if len(tt.wantIPs) > 0 { + gotIPs := make([]string, 0, len(prefixes)) + for _, p := range prefixes { + gotIPs = append(gotIPs, p.Addr().String()) + } + for _, wantIP := range tt.wantIPs { + assert.Contains(t, gotIPs, wantIP) + } + } + }) + } +} + +func TestFileList(t *testing.T) { + // Create temp file + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + content := `192.168.1.0/24 +10.0.0.0/8` + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + fl, err := newFileList(fileConfig{ + Path: path, + Name: "test-list", + Type: listTypeDeny, + }) + require.NoError(t, err) + defer fl.Close() + + assert.Equal(t, "test-list", fl.Name()) + assert.Equal(t, listTypeDeny, fl.Type()) + assert.Equal(t, 2, fl.Size()) + + // Test Check + result := fl.Check(netip.MustParseAddr("192.168.1.100")) + assert.True(t, result.Matched) + assert.Equal(t, "test-list", result.Name) + + result = fl.Check(netip.MustParseAddr("172.16.0.1")) + assert.False(t, result.Matched) +} + +func TestFileListReload(t *testing.T) { + // Create temp file + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + + err := os.WriteFile(path, []byte("192.168.1.0/24\n"), 0644) + require.NoError(t, err) + + fl, err := newFileList(fileConfig{Path: path}) + require.NoError(t, err) + defer fl.Close() + + assert.Equal(t, 1, fl.Size()) + + // Modify file + err = os.WriteFile(path, []byte("192.168.1.0/24\n10.0.0.0/8\n"), 0644) + require.NoError(t, err) + + // Wait for reload (fsnotify + 100ms delay) + assert.Eventually(t, func() bool { + return fl.Size() == 2 + }, time.Second, 50*time.Millisecond, "file should reload with 2 entries") +} + +func TestFileListDeleteAndRecreate(t *testing.T) { + // This test verifies that the file watcher correctly handles the scenario + // where a file is deleted and then recreated with new content. + // This is common with editors that use atomic saves (write temp, rename). + dir := t.TempDir() + path := filepath.Join(dir, "denylist.txt") + + // Create initial file + err := os.WriteFile(path, []byte("192.168.1.0/24\n"), 0644) + require.NoError(t, err) + + fl, err := newFileList(fileConfig{ + Path: path, + Name: "test-delete-recreate", + Type: listTypeDeny, + }) + require.NoError(t, err) + defer fl.Close() + + // Verify initial state + assert.Equal(t, 1, fl.Size()) + testIP := netip.MustParseAddr("192.168.1.100") + result := fl.Check(testIP) + assert.True(t, result.Matched, "IP should be blocked initially") + + // Delete the file + err = os.Remove(path) + require.NoError(t, err) + + // Wait a bit to ensure watcher sees the delete + time.Sleep(150 * time.Millisecond) + + // Old data should still be in memory (delete doesn't clear the prefixSet) + assert.Equal(t, 1, fl.Size(), "data should persist after file deletion") + result = fl.Check(testIP) + assert.True(t, result.Matched, "IP should still be blocked after file deletion") + + // Recreate file with different content + newContent := "10.0.0.0/8\n172.16.0.0/12\n" + err = os.WriteFile(path, []byte(newContent), 0644) + require.NoError(t, err) + + // Wait for reload (fsnotify Create event + 100ms debounce) + assert.Eventually(t, func() bool { + return fl.Size() == 2 + }, 2*time.Second, 50*time.Millisecond, "file should reload with new content") + + // Old IP should no longer be blocked + result = fl.Check(testIP) + assert.False(t, result.Matched, "192.168.x.x should NOT be blocked after recreate") + + // New IPs should be blocked + result = fl.Check(netip.MustParseAddr("10.1.2.3")) + assert.True(t, result.Matched, "10.x.x.x should be blocked after recreate") + + result = fl.Check(netip.MustParseAddr("172.16.5.5")) + assert.True(t, result.Matched, "172.16.x.x should be blocked after recreate") +} + +func TestFileListAtomicSave(t *testing.T) { + // This test simulates how editors like vim perform atomic saves: + // 1. Write to temp file + // 2. Delete original + // 3. Rename temp to original + // The watcher should pick up the final content. + dir := t.TempDir() + path := filepath.Join(dir, "denylist.txt") + tempPath := filepath.Join(dir, "denylist.txt.tmp") + + // Create initial file + err := os.WriteFile(path, []byte("192.168.1.0/24\n"), 0644) + require.NoError(t, err) + + fl, err := newFileList(fileConfig{ + Path: path, + Name: "test-atomic-save", + Type: listTypeDeny, + }) + require.NoError(t, err) + defer fl.Close() + + assert.Equal(t, 1, fl.Size()) + + // Simulate atomic save: + // 1. Write new content to temp file + err = os.WriteFile(tempPath, []byte("10.0.0.0/8\n172.16.0.0/12\n203.0.113.0/24\n"), 0644) + require.NoError(t, err) + + // 2. Remove original (some editors do this) + err = os.Remove(path) + require.NoError(t, err) + + // 3. Rename temp to original + err = os.Rename(tempPath, path) + require.NoError(t, err) + + // Wait for reload - rename triggers Create event + assert.Eventually(t, func() bool { + return fl.Size() == 3 + }, 2*time.Second, 50*time.Millisecond, "file should reload after atomic save") + + // Verify new content is active + result := fl.Check(netip.MustParseAddr("10.5.5.5")) + assert.True(t, result.Matched, "10.x.x.x should be blocked") + + result = fl.Check(netip.MustParseAddr("192.168.1.1")) + assert.False(t, result.Matched, "192.168.x.x should NOT be blocked (old content)") +} + +func TestFeedList(t *testing.T) { + // Create test server + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Last-Modified", "Mon, 02 Jan 2006 15:04:05 GMT") + io.WriteString(w, "192.168.1.0/24\n10.0.0.0/8\n") + })) + defer srv.Close() + + fl, err := newFeedList(feedConfig{ + URL: srv.URL, + Name: "test-feed", + Type: listTypeDeny, + Format: formatIP, + Refresh: time.Hour, + }) + require.NoError(t, err) + defer fl.Close() + + // Wait for async initial fetch + assert.Eventually(t, func() bool { return fl.Size() == 2 }, time.Second, 10*time.Millisecond) + + assert.Equal(t, "test-feed", fl.Name()) + assert.Equal(t, listTypeDeny, fl.Type()) + + // Test Check + result := fl.Check(netip.MustParseAddr("192.168.1.100")) + assert.True(t, result.Matched) +} + +func TestFeedListNotModified(t *testing.T) { + var requestCount atomic.Int32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + if r.Header.Get("If-Modified-Since") != "" { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("Last-Modified", "Mon, 02 Jan 2006 15:04:05 GMT") + io.WriteString(w, "192.168.1.0/24\n") + })) + defer srv.Close() + + fl, err := newFeedList(feedConfig{ + URL: srv.URL, + Format: formatIP, + Refresh: time.Hour, + }) + require.NoError(t, err) + defer fl.Close() + + // Wait for async initial fetch + assert.Eventually(t, func() bool { return requestCount.Load() == 1 }, time.Second, 10*time.Millisecond) + + // Manual update should get 304 + ctx := context.Background() + count, err := fl.Update(ctx) + require.NoError(t, err) + assert.Equal(t, 1, count) // Same count, no parse + assert.Equal(t, int32(2), requestCount.Load()) +} + +func TestFeedList304PreservesData(t *testing.T) { + // This test verifies that when a feed returns HTTP 304 Not Modified, + // the existing data is preserved and continues to work correctly. + var requestCount atomic.Int32 + lastModified := "Mon, 02 Jan 2006 15:04:05 GMT" + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Add(1) + + // Check If-Modified-Since header is correctly formatted + ims := r.Header.Get("If-Modified-Since") + if ims != "" { + assert.Equal(t, lastModified, ims, "If-Modified-Since should match Last-Modified") + w.WriteHeader(http.StatusNotModified) + return + } + + w.Header().Set("Last-Modified", lastModified) + io.WriteString(w, "192.168.1.0/24\n10.0.0.0/8\n") + })) + defer srv.Close() + + fl, err := newFeedList(feedConfig{ + URL: srv.URL, + Format: formatIP, + Name: "test-304", + Type: listTypeDeny, + Refresh: time.Hour, + }) + require.NoError(t, err) + defer fl.Close() + + // Wait for initial fetch + assert.Eventually(t, func() bool { return fl.Size() == 2 }, time.Second, 10*time.Millisecond) + assert.Equal(t, int32(1), requestCount.Load()) + + // Verify data works before 304 + testIP := netip.MustParseAddr("192.168.1.100") + result := fl.Check(testIP) + assert.True(t, result.Matched, "IP should be blocked before 304") + + // Trigger update - should get 304 + ctx := context.Background() + count, err := fl.Update(ctx) + require.NoError(t, err) + assert.Equal(t, 2, count, "count should remain 2 after 304") + assert.Equal(t, int32(2), requestCount.Load()) + + // Verify data is STILL valid after 304 response + result = fl.Check(testIP) + assert.True(t, result.Matched, "IP should still be blocked after 304") + + // Also verify other IP in the list still works + result = fl.Check(netip.MustParseAddr("10.1.2.3")) + assert.True(t, result.Matched, "10.x.x.x IP should still be blocked after 304") + + // And verify non-blocked IP is still not blocked + result = fl.Check(netip.MustParseAddr("8.8.8.8")) + assert.False(t, result.Matched, "8.8.8.8 should not be blocked") + + // Trigger multiple 304s to ensure stability + for i := 0; i < 3; i++ { + _, err := fl.Update(ctx) + require.NoError(t, err) + } + assert.Equal(t, int32(5), requestCount.Load()) // 1 initial + 4 updates + + // Data should still be intact + assert.Equal(t, 2, fl.Size()) + result = fl.Check(testIP) + assert.True(t, result.Matched, "IP should be blocked after multiple 304s") +} + +func TestManager(t *testing.T) { + // Create allowlist + allowPS := newPrefixSet() + allowPS.replace([]netip.Prefix{netip.MustParsePrefix("10.0.0.1/32")}) + + // Create denylist + denyPS := newPrefixSet() + denyPS.replace([]netip.Prefix{netip.MustParsePrefix("10.0.0.0/8")}) + + mgr := NewManager() + + // Add mock checkers (using simple struct that implements Checker) + mgr.add(&mockChecker{ + name: "allowlist", + listType: listTypeAllow, + prefixes: allowPS, + }) + mgr.add(&mockChecker{ + name: "denylist", + listType: listTypeDeny, + prefixes: denyPS, + }) + + // 10.0.0.1 should be allowed (allowlist takes priority) + denied, result := mgr.Check(netip.MustParseAddr("10.0.0.1")) + assert.False(t, denied) + assert.True(t, result.Matched) + + // 10.0.0.2 should be denied (only on denylist) + denied, result = mgr.Check(netip.MustParseAddr("10.0.0.2")) + assert.True(t, denied) + assert.True(t, result.Matched) + + // 192.168.1.1 should not match anything + denied, result = mgr.Check(netip.MustParseAddr("192.168.1.1")) + assert.False(t, denied) + assert.False(t, result.Matched) +} + +type mockChecker struct { + name string + listType listType + prefixes *prefixSet +} + +func (m *mockChecker) Check(ip netip.Addr) CheckResult { + if m.prefixes.contains(ip) { + return CheckResult{ + Matched: true, + Name: m.name, + } + } + return CheckResult{} +} + +func (m *mockChecker) Name() string { return m.name } +func (m *mockChecker) Type() listType { return m.listType } +func (m *mockChecker) Size() int { return m.prefixes.size() } +func (m *mockChecker) Close() error { return nil } +func (m *mockChecker) LastUpdate() time.Time { return time.Now() } + +// TestDenylistIntegration tests the full integration of Manager with real FileList and FeedList. +func TestDenylistIntegration(t *testing.T) { + testIP := netip.MustParseAddr("192.168.1.100") + + t.Run("IP_blocked_by_file", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "denylist.txt") + err := os.WriteFile(path, []byte("192.168.1.0/24\n"), 0644) + require.NoError(t, err) + + fl, err := newFileList(fileConfig{Path: path, Type: listTypeDeny}) + require.NoError(t, err) + defer fl.Close() + + mgr := NewManager() + mgr.add(fl) + + denied, result := mgr.Check(testIP) + assert.True(t, denied, "IP should be blocked by file denylist") + assert.True(t, result.Matched) + }) + + t.Run("IP_blocked_by_feed", func(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "192.168.1.0/24\n") + })) + defer srv.Close() + + fl, err := newFeedList(feedConfig{ + URL: srv.URL, + Format: formatIP, + Type: listTypeDeny, + Refresh: time.Hour, + }) + require.NoError(t, err) + defer fl.Close() + + // Wait for async load + assert.Eventually(t, func() bool { return fl.Size() > 0 }, time.Second, 10*time.Millisecond) + + mgr := NewManager() + mgr.add(fl) + + denied, result := mgr.Check(testIP) + assert.True(t, denied, "IP should be blocked by feed denylist") + assert.True(t, result.Matched) + }) + + t.Run("allowlist_overrides_denylist", func(t *testing.T) { + dir := t.TempDir() + + // Allowlist file with specific IP + allowPath := filepath.Join(dir, "allowlist.txt") + err := os.WriteFile(allowPath, []byte("192.168.1.100/32\n"), 0644) + require.NoError(t, err) + + allowFL, err := newFileList(fileConfig{Path: allowPath, Type: listTypeAllow, Name: "allowlist"}) + require.NoError(t, err) + defer allowFL.Close() + + // Feed denylist with broader range + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "192.168.0.0/16\n") + })) + defer srv.Close() + + denyFL, err := newFeedList(feedConfig{ + URL: srv.URL, + Format: formatIP, + Type: listTypeDeny, + Name: "denylist", + Refresh: time.Hour, + }) + require.NoError(t, err) + defer denyFL.Close() + + assert.Eventually(t, func() bool { return denyFL.Size() > 0 }, time.Second, 10*time.Millisecond) + + mgr := NewManager() + mgr.add(allowFL) + mgr.add(denyFL) + + // IP is on both lists - allowlist should win + denied, result := mgr.Check(testIP) + assert.False(t, denied, "IP should NOT be blocked (allowlist overrides)") + assert.True(t, result.Matched, "IP should match allowlist") + assert.Equal(t, "allowlist", result.Name) + + // Different IP in same range should be blocked + otherIP := netip.MustParseAddr("192.168.1.200") + denied, result = mgr.Check(otherIP) + assert.True(t, denied, "Other IP should be blocked by denylist") + }) + + t.Run("feed_update_causes_blocking", func(t *testing.T) { + // Mutable feed content + var content atomic.Value + content.Store("") + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, content.Load().(string)) + })) + defer srv.Close() + + fl, err := newFeedList(feedConfig{ + URL: srv.URL, + Format: formatIP, + Type: listTypeDeny, + Refresh: 100 * time.Millisecond, // Short refresh for test + }) + require.NoError(t, err) + defer fl.Close() + + mgr := NewManager() + mgr.add(fl) + + // Initially empty - IP should NOT be blocked + time.Sleep(50 * time.Millisecond) // Let initial fetch complete + denied, _ := mgr.Check(testIP) + assert.False(t, denied, "IP should NOT be blocked initially (empty feed)") + + // Update feed content + content.Store("192.168.1.0/24\n") + + // Wait for refresh and verify blocking + assert.Eventually(t, func() bool { + denied, _ := mgr.Check(testIP) + return denied + }, 500*time.Millisecond, 20*time.Millisecond, "IP should be blocked after feed update") + }) +} + +// BenchmarkManagerCheck benchmarks denylist check with realistic 2025Q1 feed sizes: +// - spamhaus-drop: ~1.4k IPv4 CIDRs (mix of /8 to /24) +// - spamhaus-dropv6: ~84 IPv6 prefixes +// - urlhaus: ~30k IPv4 /32s +func BenchmarkManagerCheck(b *testing.B) { + // Spamhaus DROP: ~1.4k IPv4 CIDRs + dropPrefixes := make([]netip.Prefix, 1448) + for i := range dropPrefixes { + o1, o2 := byte(1+(i*7)%254), byte((i*13)%256) + o3, o4 := byte((i*17)%256), byte(0) + ip := netip.AddrFrom4([4]byte{o1, o2, o3, o4}) + // Real DROP has mix: /8(~2%), /16(~15%), /20(~20%), /24(~63%) + bits := []int{8, 16, 16, 20, 20, 20, 24, 24, 24, 24}[i%10] + dropPrefixes[i] = netip.PrefixFrom(ip, bits) + } + + // Spamhaus DROPv6: ~84 IPv6 prefixes + dropv6Prefixes := make([]netip.Prefix, 84) + for i := range dropv6Prefixes { + ip := netip.MustParseAddr("2001:db8::").As16() + ip[2] = byte(i) + dropv6Prefixes[i] = netip.PrefixFrom(netip.AddrFrom16(ip), 32) + } + + // URLhaus: ~30k IPv4 /32s + urlhausPrefixes := make([]netip.Prefix, 30000) + for i := range urlhausPrefixes { + o1 := byte(1 + (i*7)%254) + o2 := byte((i * 13) % 256) + o3 := byte((i * 17) % 256) + o4 := byte((i * 23) % 256) + ip := netip.AddrFrom4([4]byte{o1, o2, o3, o4}) + urlhausPrefixes[i] = netip.PrefixFrom(ip, 32) + } + + dropPS := newPrefixSet() + dropPS.replace(dropPrefixes) + + dropv6PS := newPrefixSet() + dropv6PS.replace(dropv6Prefixes) + + urlhausPS := newPrefixSet() + urlhausPS.replace(urlhausPrefixes) + + mgr := NewManager() + mgr.add(&mockChecker{name: "spamhaus-drop", listType: listTypeDeny, prefixes: dropPS}) + mgr.add(&mockChecker{name: "spamhaus-dropv6", listType: listTypeDeny, prefixes: dropv6PS}) + mgr.add(&mockChecker{name: "urlhaus", listType: listTypeDeny, prefixes: urlhausPS}) + + // Test IPs - common legitimate IPs (should miss all lists) + testIPv4s := []netip.Addr{ + netip.MustParseAddr("8.8.8.8"), + netip.MustParseAddr("1.1.1.1"), + netip.MustParseAddr("192.168.1.1"), + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("172.16.0.1"), + netip.MustParseAddr("203.0.113.50"), + } + testIPv6s := []netip.Addr{ + netip.MustParseAddr("2606:4700:4700::1111"), + netip.MustParseAddr("2001:4860:4860::8888"), + } + + b.Run("IPv4", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + mgr.Check(testIPv4s[i%len(testIPv4s)]) + } + }) + + b.Run("IPv6", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + mgr.Check(testIPv6s[i%len(testIPv6s)]) + } + }) +} diff --git a/denylist/feed.go b/denylist/feed.go new file mode 100644 index 0000000..baf9ce0 --- /dev/null +++ b/denylist/feed.go @@ -0,0 +1,255 @@ +package denylist + +import ( + "context" + "fmt" + "io" + "net/http" + "net/netip" + "net/url" + "runtime/debug" + "strings" + "sync" + "time" +) + +// userAgent returns an identifier for HTTP requests to feed operators. +// This helps feed maintainers identify p2p-forge as a consumer of their data. +func userAgent() string { + const ( + name = "p2p-forge" + importPath = "github.com/ipshipyard/p2p-forge" + ) + version := "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + for _, dep := range bi.Deps { + if dep.Path == importPath { + version = dep.Version + break + } + } + // Main module + if version == "unknown" && bi.Main.Path == importPath && bi.Main.Version != "" { + version = bi.Main.Version + } + } + return name + "/" + version +} + +// feedList is an HTTP feed-based IP list that auto-refreshes. +type feedList struct { + url string + name string + listType listType + format feedFormat + refresh time.Duration + forgeSuffix string + prefixes *prefixSet + + client *http.Client + lastModified string // Last-Modified header value + lastUpdate time.Time + mu sync.RWMutex + + ctx context.Context + cancel context.CancelFunc + done chan struct{} +} + +// feedConfig holds configuration for an HTTP feed-based list. +type feedConfig struct { + URL string // feed URL + Name string // name for metrics (defaults to URL hostname) + Type listType // allow or deny (default: deny) + Format feedFormat // ip or url + Refresh time.Duration // refresh interval + ForgeSuffix string // forge domain suffix for URL format (e.g., "libp2p.direct") +} + +// newFeedList creates a new HTTP feed-based list. +func newFeedList(cfg feedConfig) (*feedList, error) { + // Validate URL + u, err := url.Parse(cfg.URL) + if err != nil { + return nil, fmt.Errorf("invalid feed URL: %w", err) + } + + name := cfg.Name + if name == "" { + // Derive name from URL + name = strings.TrimPrefix(u.Host, "www.") + if u.Path != "" && u.Path != "/" { + // Include last path segment for uniqueness + parts := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(parts) > 0 { + name = name + "-" + parts[len(parts)-1] + } + } + } + + lt := cfg.Type + if lt == "" { + lt = listTypeDeny + } + + format := cfg.Format + if format == "" { + format = formatIP + } + + refresh := cfg.Refresh + if refresh == 0 { + refresh = defaultFeedRefresh + } + + ctx, cancel := context.WithCancel(context.Background()) + + fl := &feedList{ + url: cfg.URL, + name: name, + listType: lt, + format: format, + refresh: refresh, + forgeSuffix: cfg.ForgeSuffix, + prefixes: newPrefixSet(), + client: &http.Client{ + Timeout: 30 * time.Second, + }, + ctx: ctx, + cancel: cancel, + done: make(chan struct{}), + } + + // Start background refresh (includes initial fetch) + go fl.refreshLoop() + + return fl, nil +} + +// Update fetches the feed and updates the prefix set. +// Returns the number of entries loaded. +func (fl *feedList) Update(ctx context.Context) (int, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fl.url, nil) + if err != nil { + return 0, err + } + + // Use If-Modified-Since for conditional request + fl.mu.RLock() + lastMod := fl.lastModified + fl.mu.RUnlock() + + if lastMod != "" { + req.Header.Set("If-Modified-Since", lastMod) + } + + req.Header.Set("User-Agent", userAgent()) + + resp, err := fl.client.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + // Handle 304 Not Modified + if resp.StatusCode == http.StatusNotModified { + return fl.prefixes.size(), nil + } + + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + // Parse content + prefixes, err := fl.parseBody(resp.Body) + if err != nil { + return 0, err + } + + fl.prefixes.replace(prefixes) + + now := time.Now() + fl.mu.Lock() + fl.lastUpdate = now + if lm := resp.Header.Get("Last-Modified"); lm != "" { + fl.lastModified = lm + } + fl.mu.Unlock() + + // Update metrics + updateEntries(fl.name, fl.listType, len(prefixes)) + updateLastUpdate(fl.name, now.Unix()) + + return len(prefixes), nil +} + +func (fl *feedList) parseBody(r io.Reader) ([]netip.Prefix, error) { + return parse(fl.format, r, fl.forgeSuffix) +} + +func (fl *feedList) refreshLoop() { + defer close(fl.done) + + // Initial fetch (non-blocking startup) + if n, err := fl.Update(fl.ctx); err != nil { + log.Warningf("denylist feed %s: initial fetch failed: %v (will retry in %v)", fl.name, err, fl.refresh) + } else { + log.Infof("denylist feed %s: loaded %d entries", fl.name, n) + } + + ticker := time.NewTicker(fl.refresh) + defer ticker.Stop() + + for { + select { + case <-fl.ctx.Done(): + return + case <-ticker.C: + if n, err := fl.Update(fl.ctx); err != nil { + log.Warningf("denylist feed %s: refresh failed: %v", fl.name, err) + } else { + log.Infof("denylist feed %s: refreshed, %d entries", fl.name, n) + } + } + } +} + +// Check implements checker. +func (fl *feedList) Check(ip netip.Addr) CheckResult { + if fl.prefixes.contains(ip) { + return CheckResult{ + Matched: true, + Name: fl.name, + } + } + return CheckResult{} +} + +// Name implements checker. +func (fl *feedList) Name() string { + return fl.name +} + +// Type implements checker. +func (fl *feedList) Type() listType { + return fl.listType +} + +// Size implements checker. +func (fl *feedList) Size() int { + return fl.prefixes.size() +} + +// LastUpdate implements updatable. +func (fl *feedList) LastUpdate() time.Time { + fl.mu.RLock() + defer fl.mu.RUnlock() + return fl.lastUpdate +} + +// Close implements io.Closer. +func (fl *feedList) Close() error { + fl.cancel() + <-fl.done + return nil +} diff --git a/denylist/file.go b/denylist/file.go new file mode 100644 index 0000000..d7dc752 --- /dev/null +++ b/denylist/file.go @@ -0,0 +1,189 @@ +package denylist + +import ( + "io" + "net/netip" + "os" + "path/filepath" + "sync" + "time" + + "github.com/fsnotify/fsnotify" +) + +// fileList is a file-based IP list that auto-reloads on changes. +type fileList struct { + path string + name string + listType listType + prefixes *prefixSet + watcher *fsnotify.Watcher + lastUpdate time.Time + mu sync.RWMutex + done chan struct{} + closeOnce sync.Once +} + +// fileConfig holds configuration for a file-based list. +type fileConfig struct { + Path string // absolute or relative path to file + Name string // name for metrics (defaults to filename) + Type listType // allow or deny (default: deny) + BaseDir string // base directory for relative paths +} + +// newFileList creates a new file-based list. +func newFileList(cfg fileConfig) (*fileList, error) { + // Resolve relative paths + path := cfg.Path + if !filepath.IsAbs(path) && cfg.BaseDir != "" { + path = filepath.Join(cfg.BaseDir, path) + } + + name := cfg.Name + if name == "" { + name = filepath.Base(path) + } + + lt := cfg.Type + if lt == "" { + lt = listTypeDeny + } + + fl := &fileList{ + path: path, + name: name, + listType: lt, + prefixes: newPrefixSet(), + done: make(chan struct{}), + } + + // Initial load + if err := fl.load(); err != nil { + return nil, err + } + log.Infof("denylist file %s: loaded %d entries", fl.name, fl.Size()) + + // Setup file watcher + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + fl.watcher = watcher + + // Watch the directory (more reliable than watching the file directly) + if err := watcher.Add(filepath.Dir(path)); err != nil { + watcher.Close() + return nil, err + } + + go fl.watchLoop() + + return fl, nil +} + +func (fl *fileList) load() error { + f, err := os.Open(fl.path) + if err != nil { + return err + } + defer f.Close() + + return fl.loadFrom(f) +} + +func (fl *fileList) loadFrom(r io.Reader) error { + prefixes, err := parseIP(r) + if err != nil { + return err + } + + fl.prefixes.replace(prefixes) + + now := time.Now() + fl.mu.Lock() + fl.lastUpdate = now + fl.mu.Unlock() + + // Update metrics + updateEntries(fl.name, fl.listType, len(prefixes)) + updateLastUpdate(fl.name, now.Unix()) + + return nil +} + +func (fl *fileList) watchLoop() { + filename := filepath.Base(fl.path) + + for { + select { + case <-fl.done: + return + case event, ok := <-fl.watcher.Events: + if !ok { + return + } + // Only reload if our file was modified + if filepath.Base(event.Name) == filename { + if event.Has(fsnotify.Write) || event.Has(fsnotify.Create) { + // Small delay to let writes complete + time.Sleep(100 * time.Millisecond) + if err := fl.load(); err != nil { + log.Warningf("denylist file %s: reload failed: %v", fl.name, err) + } else { + log.Infof("denylist file %s: reloaded, %d entries", fl.name, fl.Size()) + } + } + } + case err, ok := <-fl.watcher.Errors: + if ok && err != nil { + log.Warningf("denylist file %s: watcher error: %v", fl.name, err) + } + } + } +} + +// Check implements checker. +func (fl *fileList) Check(ip netip.Addr) CheckResult { + if fl.prefixes.contains(ip) { + return CheckResult{ + Matched: true, + Name: fl.name, + } + } + return CheckResult{} +} + +// Name implements checker. +func (fl *fileList) Name() string { + return fl.name +} + +// Type implements checker. +func (fl *fileList) Type() listType { + return fl.listType +} + +// Size implements checker. +func (fl *fileList) Size() int { + return fl.prefixes.size() +} + +// LastUpdate returns when the file was last loaded. +func (fl *fileList) LastUpdate() time.Time { + fl.mu.RLock() + defer fl.mu.RUnlock() + return fl.lastUpdate +} + +// Close implements io.Closer. Safe to call multiple times. +func (fl *fileList) Close() error { + var err error + fl.closeOnce.Do(func() { + close(fl.done) + if fl.watcher != nil { + err = fl.watcher.Close() + } + }) + return err +} diff --git a/denylist/manager.go b/denylist/manager.go new file mode 100644 index 0000000..afd5cee --- /dev/null +++ b/denylist/manager.go @@ -0,0 +1,88 @@ +package denylist + +import ( + "errors" + "io" + "net/netip" + "sync" +) + +// Manager combines multiple checker implementations. +// Allowlists are checked first; if an IP matches any allowlist, +// it bypasses all denylist checks. +type Manager struct { + allowlists []checker + denylists []checker + mu sync.RWMutex + closeOnce sync.Once +} + +// NewManager creates an empty Manager. +func NewManager() *Manager { + return &Manager{} +} + +// add adds a checker to the manager. +// Checkers are sorted into allowlists or denylists based on Type(). +// Within each category, checkers are evaluated in insertion order (first match wins). +func (m *Manager) add(c checker) { + m.mu.Lock() + defer m.mu.Unlock() + + if c.Type() == listTypeAllow { + m.allowlists = append(m.allowlists, c) + } else { + m.denylists = append(m.denylists, c) + } +} + +// Check checks if an IP should be denied. +// Returns (denied, result) where denied is true if the IP should be blocked. +// Allowlists are checked first - if the IP matches any allowlist, it's allowed. +func (m *Manager) Check(ip netip.Addr) (denied bool, result CheckResult) { + m.mu.RLock() + defer m.mu.RUnlock() + + // 1. Check allowlists first + for _, checker := range m.allowlists { + if r := checker.Check(ip); r.Matched { + incIPAllowed(r.Name) + return false, r // allowed - skip denylists + } + } + + // 2. Check denylists + for _, checker := range m.denylists { + if r := checker.Check(ip); r.Matched { + incIPDenied(r.Name) + return true, r // denied + } + } + + return false, CheckResult{} // not in any list +} + +// close closes all checkers that implement io.Closer. Safe to call multiple times. +func (m *Manager) close() error { + var errs []error + m.closeOnce.Do(func() { + m.mu.Lock() + defer m.mu.Unlock() + + for _, c := range m.allowlists { + if closer, ok := c.(io.Closer); ok { + if err := closer.Close(); err != nil { + errs = append(errs, err) + } + } + } + for _, c := range m.denylists { + if closer, ok := c.(io.Closer); ok { + if err := closer.Close(); err != nil { + errs = append(errs, err) + } + } + } + }) + return errors.Join(errs...) +} diff --git a/denylist/metrics.go b/denylist/metrics.go new file mode 100644 index 0000000..241e292 --- /dev/null +++ b/denylist/metrics.go @@ -0,0 +1,90 @@ +package denylist + +import ( + "sync" + "testing" + + "github.com/coredns/coredns/plugin" + "github.com/prometheus/client_golang/prometheus" +) + +const subsystem = "forge_denylist" + +var ( + deniedTotal *prometheus.CounterVec + allowedTotal *prometheus.CounterVec + entriesGauge *prometheus.GaugeVec + lastUpdateGauge *prometheus.GaugeVec + metricsOnce sync.Once +) + +// initMetrics initializes and registers denylist metrics with appropriate registry. +// Uses sync.Once to ensure single initialization across parallel tests. +func initMetrics() { + metricsOnce.Do(func() { + var registry prometheus.Registerer = prometheus.DefaultRegisterer + + if testing.Testing() { + // Use isolated registry in tests to avoid metric collisions + registry = prometheus.NewRegistry() + } + + deniedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "denied_total", + Help: "Total number of IPs denied by denylist.", + }, []string{"name"}) + + allowedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "allowed_total", + Help: "Total number of IPs allowed by allowlist override.", + }, []string{"name"}) + + entriesGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "entries", + Help: "Number of entries in each denylist source.", + }, []string{"name", "type"}) + + lastUpdateGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "last_update_timestamp", + Help: "Unix timestamp of last successful update.", + }, []string{"name"}) + + registry.MustRegister(deniedTotal, allowedTotal, entriesGauge, lastUpdateGauge) + }) +} + +// incIPDenied increments the denied IP counter for a denylist. +func incIPDenied(name string) { + if deniedTotal != nil { + deniedTotal.WithLabelValues(name).Inc() + } +} + +// incIPAllowed increments the allowed IP counter (allowlist override). +func incIPAllowed(name string) { + if allowedTotal != nil { + allowedTotal.WithLabelValues(name).Inc() + } +} + +// updateEntries updates the entry count for a list. +func updateEntries(name string, lt listType, count int) { + if entriesGauge != nil { + entriesGauge.WithLabelValues(name, string(lt)).Set(float64(count)) + } +} + +// updateLastUpdate updates the last update timestamp for a list. +func updateLastUpdate(name string, unixTimestamp int64) { + if lastUpdateGauge != nil { + lastUpdateGauge.WithLabelValues(name).Set(float64(unixTimestamp)) + } +} diff --git a/denylist/parser.go b/denylist/parser.go new file mode 100644 index 0000000..11fd11f --- /dev/null +++ b/denylist/parser.go @@ -0,0 +1,188 @@ +package denylist + +import ( + "bufio" + "io" + "net/netip" + "net/url" + "strings" +) + +// addrToPrefix converts a single IP address to a host prefix (/32 or /128). +func addrToPrefix(ip netip.Addr) netip.Prefix { + bits := 32 + if ip.Is6() { + bits = 128 + } + return netip.PrefixFrom(ip, bits) +} + +// parseIP parses content in IP format: one IP or CIDR per line. +// Lines starting with # or ; are comments. Empty lines are skipped. +// Used for Spamhaus DROP, FireHOL, and custom lists. +func parseIP(r io.Reader) ([]netip.Prefix, error) { + var prefixes []netip.Prefix + scanner := bufio.NewScanner(r) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { + continue + } + + // Handle inline comments (some lists have "1.2.3.0/24 ; SBL123456") + if idx := strings.IndexAny(line, ";#"); idx != -1 { + line = strings.TrimSpace(line[:idx]) + } + + // Try parsing as CIDR first + if prefix, err := netip.ParsePrefix(line); err == nil { + prefixes = append(prefixes, prefix) + continue + } + + // Try parsing as single IP (convert to /32 or /128) + if ip, err := netip.ParseAddr(line); err == nil { + prefixes = append(prefixes, addrToPrefix(ip)) + } + // Skip unparseable lines silently + } + + return prefixes, scanner.Err() +} + +// parseURLOptions configures URL parsing behavior. +type parseURLOptions struct { + // ForgeSuffix is the forge domain suffix (e.g., "libp2p.direct"). + // If set, domains matching this suffix have their IP extracted directly + // from the subdomain (e.g., "192-168-1-1.peerid.libp2p.direct" -> 192.168.1.1). + ForgeSuffix string +} + +// parseURL parses content in URL format: one URL per line. +// Extracts IPs directly from URL hosts. Domain names are skipped because: +// - Domain→IP mappings change frequently (stale cache) +// - Shared hosting IPs cause false positives +// - p2p-forge only blocks by IP, not domain +// Lines starting with # are comments. Used for URLhaus and similar feeds. +func parseURL(r io.Reader, opts parseURLOptions) ([]netip.Prefix, error) { + // Normalize forge suffix + forgeSuffix := strings.ToLower(opts.ForgeSuffix) + if forgeSuffix != "" && !strings.HasPrefix(forgeSuffix, ".") { + forgeSuffix = "." + forgeSuffix + } + + seen := make(map[netip.Addr]struct{}) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Parse as URL + u, err := url.Parse(line) + if err != nil { + continue + } + + host := strings.ToLower(u.Hostname()) + if host == "" { + continue + } + + // Case 1: Host is already an IP - use it + if ip, err := netip.ParseAddr(host); err == nil { + seen[ip.Unmap()] = struct{}{} + continue + } + + // Case 2: Host matches forge suffix - extract IP from subdomain + if forgeSuffix != "" && strings.HasSuffix(host, forgeSuffix) { + if ip, ok := parseForgeIP(host, forgeSuffix); ok { + seen[ip.Unmap()] = struct{}{} + continue + } + } + + // Case 3: Regular domain - skip (DNS resolution is unreliable for IP blocking) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Convert to prefixes + prefixes := make([]netip.Prefix, 0, len(seen)) + for ip := range seen { + prefixes = append(prefixes, addrToPrefix(ip)) + } + + return prefixes, nil +} + +// parseForgeIP extracts the IP address from a forge domain subdomain. +// For example: "192-168-1-1.peerid.libp2p.direct" -> 192.168.1.1 +// Returns the IP and true if successful, or invalid IP and false otherwise. +func parseForgeIP(host, forgeSuffix string) (netip.Addr, bool) { + // Verify suffix matches + if !strings.HasSuffix(host, forgeSuffix) { + return netip.Addr{}, false + } + + // Remove the forge suffix to get "192-168-1-1.peerid" + withoutSuffix := strings.TrimSuffix(host, forgeSuffix) + + // Split by "." to get subdomain parts + parts := strings.Split(withoutSuffix, ".") + if len(parts) < 1 { + return netip.Addr{}, false + } + + // The IP prefix is the first part (leftmost subdomain) + ipPrefix := parts[0] + + // Try IPv4: replace "-" with "." + ipv4Str := strings.ReplaceAll(ipPrefix, "-", ".") + if ip, err := netip.ParseAddr(ipv4Str); err == nil && ip.Is4() { + return ip, true + } + + // Try IPv6: replace "-" with ":" + // Handle RFC 1035 compliance: leading/trailing zeros may be added + ipv6Str := strings.ReplaceAll(ipPrefix, "-", ":") + + // Remove leading "0" added for RFC 1035 compliance (e.g., "0::1" -> "::1") + if strings.HasPrefix(ipv6Str, "0:") && len(ipv6Str) > 2 && ipv6Str[1] == ':' { + ipv6Str = ipv6Str[1:] + } + // Remove trailing "0" added for RFC 1035 compliance (e.g., "2001::" -> "2001::") + if strings.HasSuffix(ipv6Str, ":0") && len(ipv6Str) > 2 { + trimmed := strings.TrimSuffix(ipv6Str, "0") + // Only remove if it results in valid "::" ending + if strings.HasSuffix(trimmed, ":") { + ipv6Str = trimmed + } + } + + if ip, err := netip.ParseAddr(ipv6Str); err == nil && ip.Is6() { + return ip, true + } + + return netip.Addr{}, false +} + +// parse dispatches to the appropriate parser based on format. +func parse(format feedFormat, r io.Reader, forgeSuffix string) ([]netip.Prefix, error) { + switch format { + case formatURL: + return parseURL(r, parseURLOptions{ForgeSuffix: forgeSuffix}) + default: + return parseIP(r) + } +} diff --git a/denylist/plugin.go b/denylist/plugin.go new file mode 100644 index 0000000..aab0f70 --- /dev/null +++ b/denylist/plugin.go @@ -0,0 +1,74 @@ +package denylist + +import ( + "sync" + + "github.com/coredns/caddy" + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" +) + +const pluginName = "denylist" + +var ( + sharedManager *Manager + sharedManagerMu sync.RWMutex +) + +func init() { plugin.Register(pluginName, setup) } + +func setup(c *caddy.Controller) error { + config := dnsserver.GetConfig(c) + + // Get zone name as forgeDomain for URL parsing optimization + forgeDomain := "" + if len(config.Zone) > 0 { + forgeDomain = config.Zone + } + + c.Next() // consume "denylist" token + + mgr, err := parseConfig(c, config.Root, forgeDomain) + if err != nil { + return plugin.Error(pluginName, err) + } + + if mgr != nil { + sharedManagerMu.Lock() + sharedManager = mgr + sharedManagerMu.Unlock() + initMetrics() + + c.OnFinalShutdown(func() error { + sharedManagerMu.Lock() + m := sharedManager + sharedManagerMu.Unlock() + if m != nil { + err := m.close() + ResetManager() + return err + } + return nil + }) + } + + // denylist is a data provider, not a DNS handler + // Other plugins call GetManager() to access it + return nil +} + +// GetManager returns the shared Manager instance for other plugins. +// Returns nil if denylist plugin is not configured. +func GetManager() *Manager { + sharedManagerMu.RLock() + defer sharedManagerMu.RUnlock() + return sharedManager +} + +// ResetManager clears the shared Manager instance. +// Used during shutdown and tests. +func ResetManager() { + sharedManagerMu.Lock() + defer sharedManagerMu.Unlock() + sharedManager = nil +} diff --git a/denylist/prefixset.go b/denylist/prefixset.go new file mode 100644 index 0000000..c94600a --- /dev/null +++ b/denylist/prefixset.go @@ -0,0 +1,57 @@ +package denylist + +import ( + "net/netip" + "sync/atomic" + + "github.com/gaissmai/bart" +) + +// prefixSet is a thread-safe set of IP prefixes optimized for lookup. +// Uses a BART (Balanced Routing Table) for O(log n) lookups and +// copy-on-write semantics for lock-free reads. +type prefixSet struct { + trie atomic.Pointer[bart.Lite] +} + +// newPrefixSet creates an empty prefixSet. +func newPrefixSet() *prefixSet { + ps := &prefixSet{} + ps.trie.Store(new(bart.Lite)) + return ps +} + +// contains returns true if ip is within any prefix in the set. +// IPv4-mapped IPv6 addresses (::ffff:x.x.x.x) are unmapped before lookup, +// so they match IPv4 prefixes. +func (ps *prefixSet) contains(ip netip.Addr) bool { + t := ps.trie.Load() + if t == nil { + return false + } + // Normalize IPv4-mapped IPv6 to IPv4 (bart requires native addresses) + if ip.Is4In6() { + ip = netip.AddrFrom4(ip.As4()) + } + return t.Lookup(ip) +} + +// replace atomically replaces all prefixes in the set. +func (ps *prefixSet) replace(prefixes []netip.Prefix) { + t := new(bart.Lite) + for _, p := range prefixes { + if p.IsValid() { + t.Insert(p) + } + } + ps.trie.Store(t) +} + +// size returns the total number of prefixes in the set. +func (ps *prefixSet) size() int { + t := ps.trie.Load() + if t == nil { + return 0 + } + return t.Size() +} diff --git a/docs/METRICS.md b/docs/METRICS.md index a1f9935..6eb9a7b 100644 --- a/docs/METRICS.md +++ b/docs/METRICS.md @@ -15,6 +15,14 @@ It includes default [Prometheus Go client metrics](https://prometheus.io/docs/gu - `type=AAAA` - successful IPv6 response - `type=NODATA-{qtype}` - query type not supported for this domain (e.g., TXT, MX) - `type=NODATA-PEERID-{qtype}` - query to bare `peerid.domain` (no IP prefix) + - `type=DENIED-{list}` - IP blocked by denylist + +#### `denylist` plugin (shared by ipparser and acme) + +- `coredns_forge_denylist_denied_total{name}` - IPs denied, by list name +- `coredns_forge_denylist_allowed_total{name}` - IPs allowed (allowlist match), by list name +- `coredns_forge_denylist_entries{name,type}` - number of entries per list +- `coredns_forge_denylist_last_update_timestamp{name}` - last update unix timestamp per list #### `acme` plugin (HTTP registration + DNS-01) diff --git a/docs/denylist.md b/docs/denylist.md new file mode 100644 index 0000000..5ed4117 --- /dev/null +++ b/docs/denylist.md @@ -0,0 +1,289 @@ +# IP Denylist + +The denylist plugin prevents misuse of the public DNS and ACME registration services for malicious purposes such as DNS rebinding attacks, malware distribution, or phishing. + +> [!TIP] +> The easiest way to block an IP abusing the p2p-forge service is to report it to one of the [recommended feeds](#recommended) (e.g., URLhaus or Spamhaus). + +## Table of Contents + +- [Overview](#overview) +- [Configuration](#configuration) + - [File Directive](#file-directive) + - [Feed Directive](#feed-directive) + - [Parsing](#parsing) + - [Example](#example) +- [How It Works](#how-it-works) + - [DNS (ipparser)](#dns-ipparser) + - [ACME Registration](#acme-registration) +- [Feeds](#feeds) + - [Recommended](#recommended) + - [Other (Not Enabled by Default)](#other-not-enabled-by-default) + +## Overview + +The denylist is a standalone CoreDNS plugin shared by: + +- **ipparser**: Blocks DNS responses for denied IPs (returns NODATA) +- **acme**: Blocks ACME certificate registration for denied client IPs or multiaddr IPs (returns HTTP 403) + +```mermaid +flowchart LR + subgraph Sources["Data Sources"] + File["Local Files
(fsnotify auto-reload)"] + Feed["HTTP Feeds
(periodic refresh)"] + end + + subgraph Denylist["Denylist Plugin"] + Manager["Manager"] + Allow["Allowlists"] + Deny["Denylists"] + Manager --> Allow + Manager --> Deny + end + + subgraph Consumers["Consumer Plugins"] + ipparser["ipparser
(DNS A/AAAA)"] + acme["acme
(HTTP registration)"] + end + + File --> Manager + Feed --> Manager + Manager -.->|GetManager| ipparser + Manager -.->|GetManager| acme +``` + +The denylist system: + +- Supports both **file-based** lists (auto-reload on change via fsnotify) +- Supports **HTTP feed-based** lists (auto-refresh with `If-Modified-Since` caching) +- Allows both **allow** and **deny** list types (allowlists are checked first) +- Provides Prometheus metrics for monitoring (see [METRICS.md](METRICS.md)) + +## Configuration + +The denylist is configured as a top-level plugin in the Corefile: + +``` +denylist { + file [type=allow|deny] [name=] + feed format=ip|url [type=allow|deny] [refresh=] [name=] +} +ipparser libp2p.direct +acme libp2p.direct { ... } +``` + +### File Directive + +Loads IP addresses from a local file. The file is watched for changes and automatically reloaded. + +``` +file [type=allow|deny] [name=] +``` + +Options: + +- `path` - File path (absolute or relative to Corefile directory) +- `type` - List type: `allow` or `deny` (default: `deny`) +- `name` - Name for metrics (default: filename) + +File format: One IP or CIDR per line. Lines starting with `#` or `;` are comments. + +``` +# Example denylist +203.0.113.0/24 +198.51.100.0/24 +2001:db8::/32 +``` + +#### File Watching Behavior + +Files are monitored via fsnotify and automatically reloaded when changed: + +- Watcher starts at plugin initialization +- Changes trigger a 100ms debounce timer (reset on new events) to handle editors that perform multiple writes +- After debounce, file is re-read and parsed +- Updates are atomic (no partial state during reload) + +```mermaid +flowchart LR + subgraph Startup + Init["File configured"] --> Watch["Start fsnotify watcher"] + Watch --> Load["Read and parse file"] + Load --> Store["Store in PrefixSet"] + end + + subgraph OnChange["On File Change Event"] + Event["fsnotify event"] --> Debounce["100ms debounce timer
(reset on new events)"] + Debounce --> Reload["Read and parse file"] + Reload --> Replace["Atomic replace
PrefixSet"] + end + + Store --> Event + Replace --> Event +``` + +### Feed Directive + +Fetches IP addresses from an HTTP URL. Supports periodic refresh with `If-Modified-Since` caching. + +``` +feed format=ip|url [type=allow|deny] [refresh=] [name=] +``` + +Options: + +- `url` - HTTP(S) URL to fetch +- `format` - Content format (required): + - `ip` - One IP or CIDR per line (e.g., Spamhaus DROP) + - `url` - One URL per line; IPs extracted from hostnames (e.g., URLhaus). The parser extracts IPs directly from URL hostnames. Domain names are skipped (DNS resolution is not performed to avoid stale results and collateral damage from shared hosting). This means most URLhaus entries (which use domain names) won't match unless they target forge domains or use IP addresses as hosts directly. +- `type` - List type: `allow` or `deny` (default: `deny`) +- `refresh` - Refresh interval (default: `1h`) +- `name` - Name for metrics (default: derived from URL) + +#### Feed Refresh Behavior + +HTTP feeds are fetched periodically and cached using conditional requests: + +- Initial fetch occurs at startup +- Subsequent fetches occur at the specified refresh interval +- Requests include `If-Modified-Since` header; servers responding with 304 Not Modified skip parsing +- HTTP requests have a 30 second timeout +- If the initial fetch fails at startup, a warning is logged and the feed retries at the next refresh interval +- User-Agent header identifies requests as `p2p-forge/` +- Updates are atomic (no partial state during refresh) + +```mermaid +flowchart LR + subgraph Startup + Init["Feed configured"] --> FirstFetch["Initial fetch"] + FirstFetch --> Parse["Parse content"] + Parse --> Store["Store in PrefixSet"] + end + + subgraph RefreshLoop["Refresh Loop (every refresh interval)"] + Timer["Timer tick"] --> Request["HTTP GET with
If-Modified-Since"] + Request --> Check304{"304 Not
Modified?"} + Check304 -->|Yes| Skip["Skip parsing
(use cached data)"] + Check304 -->|No| ParseNew["Parse new content"] + ParseNew --> Replace["Atomic replace
PrefixSet"] + end + + Store --> Timer + Skip --> Timer + Replace --> Timer +``` + +### Parsing + +Parsing rules apply to both file and feed directives: + +- Invalid or unparseable lines are silently skipped +- Single IP addresses are stored internally as /32 (IPv4) or /128 (IPv6) prefixes +- Comments (lines starting with `#` or `;`) and inline comments are supported + +### Example + +``` +denylist { + # Spamhaus DROP: hijacked IP ranges used for spam and malware + # Spamhaus recommends once per day; 12h is a reasonable compromise + feed https://www.spamhaus.org/drop/drop.txt format=ip refresh=12h name=spamhaus-drop + feed https://www.spamhaus.org/drop/dropv6.txt format=ip refresh=12h name=spamhaus-dropv6 + + # URLhaus: malware distribution URLs (IPs extracted) + # URLhaus updates every 5 minutes; use their stated minimum + feed https://urlhaus.abuse.ch/downloads/text/ format=url refresh=5m name=urlhaus + + # Local allowlist: bypasses all denylists (own infrastructure, feed false positives) + file ip-allowlist.txt type=allow + # Local denylist: quick blocks without waiting for feed updates + file ip-denylist.txt +} +ipparser libp2p.direct +acme libp2p.direct { ... } +``` + +## How It Works + +### DNS (ipparser) + +When a DNS query arrives for an IP-based subdomain (e.g., `203-0-113-1.peerid.libp2p.direct`), the IP is extracted and checked against the denylist. + +```mermaid +flowchart LR + Query["DNS Query
203-0-113-1.peerid.libp2p.direct A"] --> Parse["Parse IP from subdomain
203.0.113.1"] + Parse --> CheckAllow{"On any
allowlist?"} + CheckAllow -->|Yes| ReturnA["Return A record
203.0.113.1"] + CheckAllow -->|No| CheckDeny{"On any
denylist?"} + CheckDeny -->|No| ReturnA + CheckDeny -->|Yes| ReturnNODATA["Return NODATA
(rcode=SUCCESS, 0 answers)"] + + style ReturnA fill:#90EE90,stroke:#333,color:#000 + style ReturnNODATA fill:#FFB6C1,stroke:#333,color:#000 +``` + +**Key points:** + +- Allowlists are checked first. If the IP matches an allowlist, it is permitted regardless of denylists. +- Denylists are checked second. If the IP matches a denylist (and no allowlist), the query returns NODATA. +- NODATA (not NXDOMAIN) is returned per RFC 8020 to avoid negative caching of the entire subtree. + +### ACME Registration + +When an ACME registration request arrives, the denylist checks both client IPs and multiaddr IPs. + +```mermaid +flowchart LR + Request["POST /v1/_acme-challenge
{addresses: [/ip4/203.0.113.1/tcp/4001]}"] --> ExtractClient["Extract client IPs
X-Forwarded-For + RemoteAddr"] + ExtractClient --> ExtractMA["Extract multiaddr IPs
203.0.113.1"] + ExtractMA --> CheckAll{"Any IP on
denylist?
(allowlist checked first)"} + CheckAll -->|No| Probe["libp2p connectivity probe"] + Probe --> Success["HTTP 200
Challenge registered"] + CheckAll -->|Yes| Forbidden["HTTP 403 Forbidden
IP 203.0.113.1 blocked by list-name"] + + style Success fill:#90EE90,stroke:#333,color:#000 + style Forbidden fill:#FFB6C1,stroke:#333,color:#000 +``` + +**IPs checked:** + +- **Client IP**: Extracted from X-Forwarded-For header (for load balancer deployments) or RemoteAddr +- **Multiaddr IPs**: All IP addresses from the multiaddrs in the registration request body + +If any IP matches a denylist (and no allowlist), the request returns HTTP 403 Forbidden with a message identifying the blocked IP and denylist name. + +## Feeds + +### Recommended + +#### Spamhaus DROP + +The [Spamhaus DROP](https://www.spamhaus.org/drop/) (Don't Route Or Peer) list contains IP ranges hijacked by spammers and malware operators. + +- URL: `https://www.spamhaus.org/drop/drop.txt` (IPv4) +- URL: `https://www.spamhaus.org/drop/dropv6.txt` (IPv6) +- Format: `ip` +- Recommended refresh: `12h` (Spamhaus recommends once per day; data changes slowly) +- Minimum allowed: `1h` (more frequent requests may result in IP blocking) +- Size: ~1.5k prefixes total (2025Q1) + +#### URLhaus + +[URLhaus](https://urlhaus.abuse.ch/) tracks malware distribution URLs. + +- URL: `https://urlhaus.abuse.ch/downloads/text/` +- Format: `url` +- Recommended refresh: `5m` (their stated minimum; feeds regenerate every 5 minutes) +- Size: ~30k IPs extracted from ~100k URLs (most use domains, not IPs) (2025Q1) + +### Other (Not Enabled by Default) + +#### FireHOL + +[FireHOL](https://iplists.firehol.org/) aggregates multiple threat feeds. The Level1 list overlaps with Spamhaus DROP (already included) and contains fullbogons which would block private IPs used in development. + +FireHOL provides lists for Tor exit nodes and open proxies. These are not enabled by default but operators can add them if their threat model requires blocking anonymous access: + +- Tor exits: `https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/tor_exits.ipset` +- Open proxies: `https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/firehol_proxies.netset` diff --git a/e2e_test.go b/e2e_test.go index 0d3adef..4abd55d 100644 --- a/e2e_test.go +++ b/e2e_test.go @@ -77,6 +77,7 @@ func init() { "whoami", "startup", "shutdown", + "denylist", "ipparser", "file", "acme", @@ -986,3 +987,263 @@ Stack trace: operation() } + +// DenylistTestConfig configures denylist for E2E tests +type DenylistTestConfig struct { + DenyFileContent string // content for deny list file (one IP/CIDR per line) + AllowFileContent string // content for allow list file (one IP/CIDR per line) +} + +// NewTestInfrastructureWithDenylist creates a test environment with denylist configured +func NewTestInfrastructureWithDenylist(t *testing.T, cfg DenylistTestConfig) *TestInfrastructure { + tmpDir := t.TempDir() + + tmpListener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Failed to create HTTP listener: %v", err) + } + httpPort := tmpListener.Addr().(*net.TCPAddr).Port + tmpListener.Close() + + // Build denylist block + var denylistBlock strings.Builder + denylistBlock.WriteString("denylist {\n") + + if cfg.DenyFileContent != "" { + denyFilePath := filepath.Join(tmpDir, "denylist.txt") + if err := os.WriteFile(denyFilePath, []byte(cfg.DenyFileContent), 0644); err != nil { + t.Fatalf("Failed to write deny file: %v", err) + } + denylistBlock.WriteString(fmt.Sprintf("\t\tfile %s type=deny name=test-deny\n", denyFilePath)) + } + + if cfg.AllowFileContent != "" { + allowFilePath := filepath.Join(tmpDir, "allowlist.txt") + if err := os.WriteFile(allowFilePath, []byte(cfg.AllowFileContent), 0644); err != nil { + t.Fatalf("Failed to write allow file: %v", err) + } + denylistBlock.WriteString(fmt.Sprintf("\t\tfile %s type=allow name=test-allow\n", allowFilePath)) + } + + denylistBlock.WriteString("\t}") + + corefile := fmt.Sprintf(`.:0 { + log + errors + %s + ipparser %s + acme %s { + registration-domain %s listen-address=:%d external-tls=true + database-type badger %s + } + }`, denylistBlock.String(), forge, forge, forgeRegistration, httpPort, tmpDir) + + instance, err := caddy.Start(NewInput(corefile)) + if err != nil { + t.Fatalf("Failed to start CoreDNS instance: %v", err) + } + + testInfra := &TestInfrastructure{ + DNSServerAddress: instance.Servers()[0].LocalAddr().String(), + HTTPPort: httpPort, + TmpDir: tmpDir, + Instance: instance, + } + + t.Cleanup(func() { + if instance != nil { + errs := instance.ShutdownCallbacks() + if err := errors.Join(errs...); err != nil { + t.Logf("Shutdown callback errors: %v", err) + } + if err := instance.Stop(); err != nil { + t.Logf("Instance stop error: %v", err) + } + instance.Wait() + } + }) + + return testInfra +} + +func TestDenylistE2E(t *testing.T) { + // Note: This test runs sequentially (no t.Parallel()) because the denylist + // plugin uses a global singleton (sharedManager). Running in parallel with + // other tests that start CoreDNS instances would cause data races. + + _, pk, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + t.Fatal(err) + } + peerID, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + peerIDb36, err := peer.ToCid(peerID).StringOfBase(multibase.Base36) + if err != nil { + t.Fatal(err) + } + + t.Run("no_denylist_works", func(t *testing.T) { + testInfra := NewTestInfrastructure(t) + + // Query for 1.2.3.4 - should return the IP + m := &dns.Msg{Question: []dns.Question{{ + Qclass: dns.ClassINET, + Name: fmt.Sprintf("1-2-3-4.%s.%s.", peerIDb36, forge), + Qtype: dns.TypeA, + }}} + + r, err := dns.Exchange(m, testInfra.DNSServerAddress) + if err != nil { + t.Fatalf("Could not send message: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + t.Fatalf("Expected successful reply, got %s", dns.RcodeToString[r.Rcode]) + } + if len(r.Answer) != 1 { + t.Fatalf("Expected 1 answer, got %d", len(r.Answer)) + } + if a, ok := r.Answer[0].(*dns.A); !ok || a.A.String() != "1.2.3.4" { + t.Fatalf("Expected A record 1.2.3.4, got %v", r.Answer[0]) + } + }) + + t.Run("blocked_IP_returns_NODATA", func(t *testing.T) { + testInfra := NewTestInfrastructureWithDenylist(t, DenylistTestConfig{ + DenyFileContent: "1.2.3.4\n", + }) + + // Query for 1.2.3.4 - should return NODATA (success but no answers) + m := &dns.Msg{Question: []dns.Question{{ + Qclass: dns.ClassINET, + Name: fmt.Sprintf("1-2-3-4.%s.%s.", peerIDb36, forge), + Qtype: dns.TypeA, + }}} + + r, err := dns.Exchange(m, testInfra.DNSServerAddress) + if err != nil { + t.Fatalf("Could not send message: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + t.Fatalf("Expected NODATA (success with no answers), got %s", dns.RcodeToString[r.Rcode]) + } + if len(r.Answer) != 0 { + t.Fatalf("Expected NODATA (no answers), got %d answers: %v", len(r.Answer), r.Answer) + } + }) + + t.Run("blocked_CIDR_returns_NODATA", func(t *testing.T) { + testInfra := NewTestInfrastructureWithDenylist(t, DenylistTestConfig{ + DenyFileContent: "10.0.0.0/8\n", + }) + + // Query for 10.1.2.3 - should be blocked by CIDR + m := &dns.Msg{Question: []dns.Question{{ + Qclass: dns.ClassINET, + Name: fmt.Sprintf("10-1-2-3.%s.%s.", peerIDb36, forge), + Qtype: dns.TypeA, + }}} + + r, err := dns.Exchange(m, testInfra.DNSServerAddress) + if err != nil { + t.Fatalf("Could not send message: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + t.Fatalf("Expected NODATA (success with no answers), got %s", dns.RcodeToString[r.Rcode]) + } + if len(r.Answer) != 0 { + t.Fatalf("Expected NODATA (no answers), got %d answers: %v", len(r.Answer), r.Answer) + } + }) + + t.Run("allowlist_overrides_denylist", func(t *testing.T) { + testInfra := NewTestInfrastructureWithDenylist(t, DenylistTestConfig{ + DenyFileContent: "10.0.0.0/8\n", // Blocks entire 10.x.x.x range + AllowFileContent: "10.1.2.3\n", // Except this specific IP + }) + + // Query for 10.1.2.3 - should be allowed (allowlist overrides denylist) + m := &dns.Msg{Question: []dns.Question{{ + Qclass: dns.ClassINET, + Name: fmt.Sprintf("10-1-2-3.%s.%s.", peerIDb36, forge), + Qtype: dns.TypeA, + }}} + + r, err := dns.Exchange(m, testInfra.DNSServerAddress) + if err != nil { + t.Fatalf("Could not send message: %s", err) + } + if r.Rcode != dns.RcodeSuccess { + t.Fatalf("Expected successful reply, got %s", dns.RcodeToString[r.Rcode]) + } + if len(r.Answer) != 1 { + t.Fatalf("Expected 1 answer (allowlist should override), got %d", len(r.Answer)) + } + if a, ok := r.Answer[0].(*dns.A); !ok || a.A.String() != "10.1.2.3" { + t.Fatalf("Expected A record 10.1.2.3, got %v", r.Answer[0]) + } + + // Query for 10.9.9.9 - should still be blocked (not in allowlist) + m2 := &dns.Msg{Question: []dns.Question{{ + Qclass: dns.ClassINET, + Name: fmt.Sprintf("10-9-9-9.%s.%s.", peerIDb36, forge), + Qtype: dns.TypeA, + }}} + + r2, err := dns.Exchange(m2, testInfra.DNSServerAddress) + if err != nil { + t.Fatalf("Could not send message: %s", err) + } + if r2.Rcode != dns.RcodeSuccess || len(r2.Answer) != 0 { + t.Fatalf("Expected NODATA for non-allowlisted IP 10.9.9.9, got rcode=%s answers=%v", + dns.RcodeToString[r2.Rcode], r2.Answer) + } + }) + + t.Run("ACME_registration_blocked_returns_403", func(t *testing.T) { + testInfra := NewTestInfrastructureWithDenylist(t, DenylistTestConfig{ + DenyFileContent: "1.2.3.4\n", + }) + + // Create a peer with identity + sk, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + t.Fatal(err) + } + + // Create multiaddr with blocked IP + blockedMA, err := multiaddr.NewMultiaddr("/ip4/1.2.3.4/tcp/4001") + if err != nil { + t.Fatal(err) + } + + // Create valid challenge value (SHA256 digest in base64url) + testDigest := sha256.Sum256([]byte("test-blocked")) + testChallenge := base64.RawURLEncoding.EncodeToString(testDigest[:]) + + // Attempt registration - should fail with 403 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err = client.SendChallenge(ctx, fmt.Sprintf("http://127.0.0.1:%d", testInfra.HTTPPort), sk, testChallenge, []multiaddr.Multiaddr{blockedMA}, authToken, "", func(req *http.Request) error { + req.Host = forgeRegistration + return nil + }) + + // Should get an error containing 403 and the denylist name + if err == nil { + t.Fatal("Expected error for blocked IP registration, got nil") + } + errStr := err.Error() + if !strings.Contains(errStr, "403") { + t.Fatalf("Expected 403 error, got: %v", err) + } + if !strings.Contains(errStr, "test-deny") { + t.Fatalf("Expected error to mention denylist name 'test-deny', got: %v", err) + } + if !strings.Contains(errStr, "1.2.3.4") { + t.Fatalf("Expected error to mention blocked IP '1.2.3.4', got: %v", err) + } + }) +} diff --git a/go.mod b/go.mod index 577838d..b9d0f4c 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/ipshipyard/p2p-forge // NOTE: p2p-forge should match go.mod of latest go-libp2p release -go 1.24 +go 1.24.0 require ( github.com/aws/aws-sdk-go v1.55.6 @@ -9,6 +9,8 @@ require ( github.com/coredns/caddy v1.1.1 github.com/coredns/coredns v1.11.3 github.com/felixge/httpsnoop v1.0.4 + github.com/fsnotify/fsnotify v1.9.0 + github.com/gaissmai/bart v0.26.0 github.com/ipfs/go-datastore v0.8.2 github.com/ipfs/go-ds-badger4 v0.1.8 github.com/ipfs/go-ds-dynamodb v0.2.0 @@ -23,6 +25,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/prometheus/client_golang v1.22.0 github.com/slok/go-http-metrics v0.12.0 + github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 ) @@ -32,6 +35,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/dgraph-io/badger/v4 v4.5.1 // indirect @@ -134,6 +138,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/go.sum b/go.sum index a3df9f6..5ba872e 100644 --- a/go.sum +++ b/go.sum @@ -66,6 +66,10 @@ github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gaissmai/bart v0.26.0 h1:xOZ57E9hJLBiQaSyeZa9wgWhGuzfGACgqp4BE77OkO0= +github.com/gaissmai/bart v0.26.0/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= diff --git a/ipparser/plugin.go b/ipparser/plugin.go index c82d02b..198b5d5 100644 --- a/ipparser/plugin.go +++ b/ipparser/plugin.go @@ -15,6 +15,7 @@ import ( "github.com/coredns/coredns/plugin" "github.com/miekg/dns" + "github.com/ipshipyard/p2p-forge/denylist" "github.com/libp2p/go-libp2p/core/peer" ) @@ -29,16 +30,11 @@ func setup(c *caddy.Controller) error { if c.NextArg() { forgeDomain = c.Val() } - if c.NextArg() { - // If there was another token, return an error, because we don't have any configuration. - // Any errors returned from this setup function should be wrapped with plugin.Error, so we - // can present a slightly nicer error message to the user. - return plugin.Error(pluginName, c.ArgErr()) - } + + config := dnsserver.GetConfig(c) // Read SOA from zone/{forgeDomain} file var soa *dns.SOA - config := dnsserver.GetConfig(c) zoneFile := filepath.Join(config.Root, "zones", forgeDomain) f, err := os.Open(filepath.Clean(zoneFile)) if err != nil { @@ -70,9 +66,19 @@ func setup(c *caddy.Controller) error { }, } + p := &ipParser{ + ForgeDomain: strings.ToLower(forgeDomain), + SOA: soaRR, + // Denylist reference is captured at setup time. If the denylist plugin + // is reconfigured, this reference becomes stale. This is acceptable + // because CoreDNS plugin reconfiguration requires a full server restart. + Denylist: denylist.GetManager(), + } + // Add the Plugin to CoreDNS, so Servers can use it in their plugin chain. - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - return ipParser{Next: next, ForgeDomain: strings.ToLower(forgeDomain), SOA: soaRR} + config.AddPlugin(func(next plugin.Handler) plugin.Handler { + p.Next = next + return p }) return nil @@ -81,7 +87,8 @@ func setup(c *caddy.Controller) error { type ipParser struct { Next plugin.Handler ForgeDomain string - SOA []dns.RR // Cached SOA record from zone file + SOA []dns.RR // Cached SOA record from zone file + Denylist *denylist.Manager // Optional IP denylist (nil if not configured) } // The TTL for self-referential ip.peerid.etld A/AAAA records can be as long as possible. @@ -89,7 +96,7 @@ type ipParser struct { const ttl = 7 * 24 * time.Hour // ServeDNS implements the plugin.Handler interface. -func (p ipParser) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { +func (p *ipParser) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { var answers []dns.RR containsNODATAResponse := false for _, q := range r.Question { @@ -141,6 +148,15 @@ func (p ipParser) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg continue } + // Check denylist (allowlists checked first internally) + if p.Denylist != nil { + if denied, result := p.Denylist.Check(ip); denied { + containsNODATAResponse = true + dynamicResponseCount.WithLabelValues("DENIED-" + result.Name).Add(1) + continue + } + } + switch q.Qtype { case dns.TypeA: answers = append(answers, &dns.A{ @@ -193,7 +209,7 @@ func (p ipParser) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg } // Name implements the Handler interface. -func (p ipParser) Name() string { return pluginName } +func (p *ipParser) Name() string { return pluginName } // parseIPFromPrefix converts a DNS prefix to an IP address based on query type func parseIPFromPrefix(prefix string, qtype uint16) (netip.Addr, error) { diff --git a/main.go b/main.go index fa9bc3b..b8a064e 100644 --- a/main.go +++ b/main.go @@ -15,6 +15,7 @@ import ( ) var p2pForgeDirectives = []string{ + "denylist", // must be first - provides Manager for ipparser and acme "ipparser", "acme", } diff --git a/plugins/plugins.go b/plugins/plugins.go index 9cbf925..dddc032 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -14,7 +14,9 @@ import ( _ "github.com/coredns/coredns/plugin/reload" _ "github.com/coredns/coredns/plugin/root" - // Load p2p-forge plugins + // Load p2p-forge plugins (denylist must be first - provides Manager for others) + _ "github.com/ipshipyard/p2p-forge/denylist" + _ "github.com/ipshipyard/p2p-forge/acme" _ "github.com/ipshipyard/p2p-forge/ipparser" )