Refactor spam score

This commit is contained in:
nemunaire 2025-10-22 12:26:07 +07:00
commit 866cf2e5db
4 changed files with 16 additions and 76 deletions

View file

@ -37,7 +37,6 @@ type ReportGenerator struct {
rblChecker *RBLChecker
contentAnalyzer *ContentAnalyzer
headerAnalyzer *HeaderAnalyzer
scorer *DeliverabilityScorer
}
// NewReportGenerator creates a new report generator
@ -53,7 +52,6 @@ func NewReportGenerator(
rblChecker: NewRBLChecker(dnsTimeout, rbls),
contentAnalyzer: NewContentAnalyzer(httpTimeout),
headerAnalyzer: NewHeaderAnalyzer(),
scorer: NewDeliverabilityScorer(),
}
}
@ -119,7 +117,7 @@ func (r *ReportGenerator) GenerateReport(testID uuid.UUID, results *AnalysisResu
spamScore := 0
if results.SpamAssassin != nil {
spamScore = r.scorer.CalculateSpamScore(results.SpamAssassin)
spamScore = r.spamAnalyzer.CalculateSpamAssassinScore(results.SpamAssassin)
}
report.Summary = &api.ScoreSummary{

View file

@ -49,37 +49,3 @@ func ScoreToGrade(score int) string {
func ScoreToReportGrade(score int) api.ReportGrade {
return api.ReportGrade(ScoreToGrade(score))
}
// DeliverabilityScorer aggregates all analysis results and computes overall score
type DeliverabilityScorer struct{}
// NewDeliverabilityScorer creates a new deliverability scorer
func NewDeliverabilityScorer() *DeliverabilityScorer {
return &DeliverabilityScorer{}
}
// CalculateSpamScore calculates spam score from SpamAssassin results
// Returns a score from 0-100 where higher is better
func (s *DeliverabilityScorer) CalculateSpamScore(result *SpamAssassinResult) int {
if result == nil {
return 100 // No spam scan results, assume good
}
// SpamAssassin score typically ranges from -10 to +20
// Score < 0 is very likely ham (good)
// Score 0-5 is threshold range (configurable, usually 5.0)
// Score > 5 is likely spam
score := result.Score
// Convert SpamAssassin score to 0-100 scale (inverted - lower SA score is better)
if score <= 0 {
return 100 // Perfect score for ham
} else if score >= result.RequiredScore {
return 0 // Failed spam test
} else {
// Linear scale between 0 and required threshold
percentage := (score / result.RequiredScore) * 100
return int(100 - percentage)
}
}

View file

@ -1,24 +0,0 @@
// This file is part of the happyDeliver (R) project.
// Copyright (c) 2025 happyDomain
// Authors: Pierre-Olivier Mercier, et al.
//
// This program is offered under a commercial and under the AGPL license.
// For commercial licensing, contact us at <contact@happydomain.org>.
//
// For AGPL licensing:
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
package analyzer
import ()

View file

@ -172,26 +172,26 @@ func (a *SpamAssassinAnalyzer) parseSpamReport(report string, result *SpamAssass
}
}
// GetSpamAssassinScore calculates the SpamAssassin contribution to deliverability
func (a *SpamAssassinAnalyzer) GetSpamAssassinScore(result *SpamAssassinResult) int {
// CalculateSpamAssassinScore calculates the SpamAssassin contribution to deliverability
func (a *SpamAssassinAnalyzer) CalculateSpamAssassinScore(result *SpamAssassinResult) int {
if result == nil {
return 0
return 100 // No spam scan results, assume good
}
// SpamAssassin score typically ranges from -10 to +20
// Score < 0 is very likely ham (good)
// Score 0-5 is threshold range (configurable, usually 5.0)
// Score > 5 is likely spam
score := result.Score
required := result.RequiredScore
if required == 0 {
required = 5 // Default SpamAssassin threshold
}
// Calculate deliverability score
// Convert SpamAssassin score to 0-100 scale (inverted - lower SA score is better)
if score <= 0 {
return 100
return 100 // Perfect score for ham
} else if score >= result.RequiredScore {
return 0 // Failed spam test
} else {
// Linear scale between 0 and required threshold
return 100 - int(math.Round(score*100/result.RequiredScore))
}
if score <= required*4 {
return 0
}
// Linear scaling based on how negative/low the score is
return 100 - int(math.Round(25*score/required))
}