Moritz Poldrack: 1 rules: refactor commercial ISP parsers 5 files changed, 201 insertions(+), 176 deletions(-)
Copy & paste the following snippet into your terminal to import this patchset into git:
curl -s https://lists.sr.ht/~sircmpwn/abused-devel/patches/49635/mbox | git am -3Learn more about email & git
From: Moritz Poldrack <git@moritz.sh> To make expansion easier and help with keeping files short, split the ISP rules into different types which implement a common interface. Signed-off-by: Moritz Poldrack <git@moritz.sh> --- I noticed that I had to jump around quite a bit when adding new ISP parsers, so I refactored them into their own package. rules/commercial-isp.go | 196 ++++------------------------------------ rules/isps/aws.go | 53 +++++++++++ rules/isps/google.go | 79 ++++++++++++++++ rules/isps/interface.go | 6 ++ rules/isps/vultr.go | 43 +++++++++ 5 files changed, 201 insertions(+), 176 deletions(-) create mode 100644 rules/isps/aws.go create mode 100644 rules/isps/google.go create mode 100644 rules/isps/interface.go create mode 100644 rules/isps/vultr.go diff --git a/rules/commercial-isp.go b/rules/commercial-isp.go index 475b5ad..d8b0deb 100644 --- a/rules/commercial-isp.go +++ b/rules/commercial-isp.go @@ -2,52 +2,22 @@ package rules import ( "context" - "encoding/json" - "io" + "fmt" "log" "net" - "net/http" "os" "time" - "git.sr.ht/~sircmpwn/dowork" + work "git.sr.ht/~sircmpwn/dowork" "github.com/yl2chen/cidranger" "git.sr.ht/~sircmpwn/abused/as" "git.sr.ht/~sircmpwn/abused/config" "git.sr.ht/~sircmpwn/abused/graph/model" + "git.sr.ht/~sircmpwn/abused/rules/isps" ) // These providers offer dumps of their prefixes -const ( - AWS_URL string = "https://ip-ranges.amazonaws.com/ip-ranges.json" - GOOGLE_URL string = "https://www.gstatic.com/ipranges/goog.json" - GCP_URL string = "https://www.gstatic.com/ipranges/cloud.json" - VULTR_URL string = "https://geofeed.constant.com/?json" -) - -type AWSPayload struct { - Prefixes []struct { - IPPrefix string `json:"ip_prefix"` - } `json:"prefixes"` - - IPv6Prefixes []struct { - IPPrefix string `json:"ipv6_prefix"` - } `json:"ipv6_prefixes"` -} - -type GCPPayload struct { - Prefixes []struct { - IPv4Prefix *string `json:"ipv4_prefix"` - IPv6Prefix *string `json:"ipv6_prefix"` - } -} - -type VultrPayload struct { - Subnets []struct { - Prefix string `json:"ip_prefix"` - } -} // Rule that blocks IP addresses associated with commercial ISPs type CommercialISPRule struct { @@ -64,9 +34,9 @@ func NewCommercialISPRule(conf *config.Config) *CommercialISPRule { log: log.New(os.Stderr, "[block-commercial-isp] ", log.LstdFlags), conf: conf, } - work.Submit(rule.UpdateAWS) - work.Submit(rule.UpdateGCP) - work.Submit(rule.UpdateVultr) + work.Submit(rule.Update(new(isps.AWS))) + work.Submit(rule.Update(new(isps.Google))) + work.Submit(rule.Update(new(isps.Vultr))) accessToken := conf.ThirdParty.Cloudflare.Bearer if accessToken != "" { @@ -105,157 +75,31 @@ func (rule *CommercialISPRule) Report(ctx context.Context, sample *model.Sample, // no-op } -func (rule *CommercialISPRule) UpdateAWS(ctx context.Context) error { - rule.log.Println("Updating list of AWS networks") - - resp, err := http.Get(AWS_URL) - if err != nil { - return err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - var dump AWSPayload - err = json.Unmarshal(body, &dump) - if err != nil { - return err - } +func (rule *CommercialISPRule) Update(backend isps.Interface) func(ctx context.Context) error { + var self func(ctx context.Context) error + self = func(ctx context.Context) error { + rule.log.Printf("Updating list of %s networks", backend.Name()) - for _, prefix := range dump.Prefixes { - _, subnet, err := net.ParseCIDR(prefix.IPPrefix) - if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) - } - for _, prefix := range dump.IPv6Prefixes { - _, subnet, err := net.ParseCIDR(prefix.IPPrefix) + prefixes, err := backend.SubnetList() if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) - } - rule.log.Printf("Found %d AWS subnets", - len(dump.Prefixes)+len(dump.IPv6Prefixes)) - - task := work.NewTask(rule.UpdateAWS). - NotBefore(time.Now().Add(48 * time.Hour)) - work.Enqueue(task) - return nil -} - -func (rule *CommercialISPRule) UpdateGCP(ctx context.Context) error { - rule.log.Println("Updating list of GCP networks") - - resp, err := http.Get(GOOGLE_URL) - if err != nil { - return err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - var dump GCPPayload - err = json.Unmarshal(body, &dump) - if err != nil { - return err - } - - for _, prefix := range dump.Prefixes { - if prefix.IPv4Prefix != nil { - _, subnet, err := net.ParseCIDR(*prefix.IPv4Prefix) - if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) + return fmt.Errorf("failed to update list of %s networks: %w", backend.Name(), err) } - if prefix.IPv6Prefix != nil { - _, subnet, err := net.ParseCIDR(*prefix.IPv6Prefix) - if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) - } - } - rule.log.Printf("Found %d Google subnets", len(dump.Prefixes)) - googleHits := len(dump.Prefixes) - resp, err = http.Get(GCP_URL) - if err != nil { - return err - } - defer resp.Body.Close() - body, err = io.ReadAll(resp.Body) - if err != nil { - return err - } - - err = json.Unmarshal(body, &dump) - if err != nil { - return err - } - - for _, prefix := range dump.Prefixes { - if prefix.IPv4Prefix != nil { - _, subnet, err := net.ParseCIDR(*prefix.IPv4Prefix) - if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) - } - if prefix.IPv6Prefix != nil { - _, subnet, err := net.ParseCIDR(*prefix.IPv6Prefix) + for _, prefix := range prefixes { + _, subnet, err := net.ParseCIDR(prefix) if err != nil { return err } rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) } - } - rule.log.Printf("Found %d GCP subnets", len(dump.Prefixes)-googleHits) + rule.log.Printf("Found %d %s subnets", len(prefixes), backend.Name()) - task := work.NewTask(rule.UpdateGCP). - NotBefore(time.Now().Add(48 * time.Hour)) - work.Enqueue(task) - return nil -} - -func (rule *CommercialISPRule) UpdateVultr(ctx context.Context) error { - rule.log.Println("Updating list of Vultr networks") - - resp, err := http.Get(VULTR_URL) - if err != nil { - return err + task := work.NewTask(self). + NotBefore(time.Now().Add(48 * time.Hour)) + work.Enqueue(task) + return nil } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - var dump VultrPayload - err = json.Unmarshal(body, &dump) - if err != nil { - return err - } - - for _, prefix := range dump.Subnets { - _, subnet, err := net.ParseCIDR(prefix.Prefix) - if err != nil { - return err - } - rule.trie.Insert(cidranger.NewBasicRangerEntry(*subnet)) - } - rule.log.Printf("Found %d Vultr subnets", len(dump.Subnets)) - - task := work.NewTask(rule.UpdateVultr). - NotBefore(time.Now().Add(48 * time.Hour)) - work.Enqueue(task) - return nil + return self } func (rule *CommercialISPRule) UpdateOther(ctx context.Context) error { diff --git a/rules/isps/aws.go b/rules/isps/aws.go new file mode 100644 index 0000000..1e0292d --- /dev/null +++ b/rules/isps/aws.go @@ -0,0 +1,53 @@ +package isps + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +const AWS_URL string = "https://ip-ranges.amazonaws.com/ip-ranges.json" + +type AWS struct { + Prefixes []struct { + IPPrefix string `json:"ip_prefix"` + } `json:"prefixes"` + + IPv6Prefixes []struct { + IPPrefix string `json:"ipv6_prefix"` + } `json:"ipv6_prefixes"` +} + +var _ Interface = (*AWS)(nil) + +func (aws *AWS) Name() string { + return "AWS" +} + +func (aws *AWS) SubnetList() ([]string, error) { + resp, err := http.Get(AWS_URL) + if err != nil { + return nil, fmt.Errorf("failed to fetch list of AWS prefixes: %w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read list of AWS prefixes: %w", err) + } + + err = json.Unmarshal(body, aws) + if err != nil { + return nil, fmt.Errorf("failed to parse list of AWS prefixes: %w", err) + } + + var subnets []string + for _, prefix := range aws.Prefixes { + subnets = append(subnets, prefix.IPPrefix) + } + for _, prefix := range aws.IPv6Prefixes { + subnets = append(subnets, prefix.IPPrefix) + } + + return subnets, nil +} diff --git a/rules/isps/google.go b/rules/isps/google.go new file mode 100644 index 0000000..d9309ab --- /dev/null +++ b/rules/isps/google.go @@ -0,0 +1,79 @@ +package isps + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +const ( + GOOGLE_URL string = "https://www.gstatic.com/ipranges/goog.json" + GCP_URL string = "https://www.gstatic.com/ipranges/cloud.json" +) + +type Google struct { + Prefixes []struct { + IPv4Prefix string `json:"ipv4Prefix"` + IPv6Prefix string `json:"ipv6Prefix"` + } `json:"prefixes"` +} + +var _ Interface = (*Google)(nil) + +func (g *Google) Name() string { + return "Google" +} + +func (g *Google) SubnetList() ([]string, error) { + resp, err := http.Get(GOOGLE_URL) + if err != nil { + return nil, fmt.Errorf("failed to fetch list of Google prefixes: %w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read list of Google prefixes: %w", err) + } + + err = json.Unmarshal(body, &g) + if err != nil { + return nil, fmt.Errorf("failed to parse list of Google prefixes: %w", err) + } + + var subnets []string + for _, prefix := range g.Prefixes { + if prefix.IPv4Prefix != "" { + subnets = append(subnets, prefix.IPv4Prefix) + } + if prefix.IPv6Prefix != "" { + subnets = append(subnets, prefix.IPv6Prefix) + } + } + + resp, err = http.Get(GCP_URL) + if err != nil { + return nil, fmt.Errorf("failed to fetch list of GCP prefixes: %w", err) + } + defer resp.Body.Close() + body, err = io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read list of GCP prefixes: %w", err) + } + + err = json.Unmarshal(body, &g) + if err != nil { + return nil, fmt.Errorf("failed to parse list of GCP prefixes: %w", err) + } + + for _, prefix := range g.Prefixes { + if prefix.IPv4Prefix != "" { + subnets = append(subnets, prefix.IPv4Prefix) + } + if prefix.IPv6Prefix != "" { + subnets = append(subnets, prefix.IPv6Prefix) + } + } + + return subnets, nil +} diff --git a/rules/isps/interface.go b/rules/isps/interface.go new file mode 100644 index 0000000..2616110 --- /dev/null +++ b/rules/isps/interface.go @@ -0,0 +1,6 @@ +package isps + +type Interface interface { + Name() string + SubnetList() ([]string, error) +} diff --git a/rules/isps/vultr.go b/rules/isps/vultr.go new file mode 100644 index 0000000..2070a0e --- /dev/null +++ b/rules/isps/vultr.go @@ -0,0 +1,43 @@ +package isps + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +const VULTR_URL string = "https://geofeed.constant.com/?json" + +type Vultr struct { + Subnets []struct { + Prefix string `json:"ip_prefix"` + } +} + +func (vultr *Vultr) Name() string { + return "Vultr" +} + +func (vultr *Vultr) SubnetList() ([]string, error) { + resp, err := http.Get(VULTR_URL) + if err != nil { + return nil, fmt.Errorf("failed to fetch list of Vultr prefixes: %w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read list of Vultr prefixes: %w", err) + } + + err = json.Unmarshal(body, &vultr) + if err != nil { + return nil, fmt.Errorf("failed to parse list of Vultr prefixes: %w", err) + } + + var subnets []string + for _, prefix := range vultr.Subnets { + subnets = append(subnets, prefix.Prefix) + } + return subnets, nil +} -- 2.43.2
Thanks! To git@git.sr.ht:~sircmpwn/abused 9c9dde5..e1fc722 master -> master