Add event-taxonomy package with canonical schema, adapters, and CLI

Canonical NormalizedFinding schema with Severity enum (CRITICAL/HIGH/MEDIUM/LOW/INFO).
Per-project adapters for 9 tools with severity mapping for string labels, int 1-10,
float 0-1, Python Enum, and computed properties. CLI pipe interface and registry.

Nightshift-Task: event-taxonomy
Nightshift-Ref: https://github.com/marcus/nightshift
This commit is contained in:
Claude
2026-03-09 21:09:13 +00:00
parent ef0c88d50d
commit a31093822c
35 changed files with 709 additions and 0 deletions

145
tests/test_adapters.py Normal file
View File

@@ -0,0 +1,145 @@
from event_taxonomy.adapters import (
bus_factor,
dep_risk,
doc_drift,
knowledge_silo,
perf_regression,
roadmap_entropy,
schema_evolution,
semantic_diff,
test_flakiness,
)
from event_taxonomy.schema import Severity
def test_bus_factor():
f = bus_factor.normalize({
"file": "core.py",
"top_author": "alice",
"top_author_pct": 85.0,
"num_contributors": 2,
"bus_factor": 1,
"risk_label": "CRITICAL",
})
assert f.severity == Severity.CRITICAL
assert f.file == "core.py"
assert "alice" in f.message
def test_dep_risk():
f = dep_risk.normalize({
"name": "requests",
"ecosystem": "pypi",
"version": "2.28.0",
"risk_label": "HIGH",
"vuln_count": 2,
"months_stale": 6,
})
assert f.severity == Severity.HIGH
assert "requests" in f.message
def test_doc_drift():
f = doc_drift.normalize({
"kind": "docstring_param",
"severity": "error",
"message": "Param x not in signature",
"file": "model.py",
"lineno": 42,
"symbol": "process",
})
assert f.severity == Severity.HIGH
assert f.line == 42
assert f.file == "model.py"
def test_knowledge_silo():
f = knowledge_silo.normalize({
"filepath": "auth.py",
"total_commits": 50,
"dominant_author": "bob",
"dominant_commits": 45,
"other_authors": {"alice": 5},
"risk": "HIGH",
})
assert f.severity == Severity.HIGH
assert "bob" in f.message
def test_perf_regression():
f = perf_regression.normalize({
"pattern": "n-plus-one-query",
"severity": 8,
"file": "api.py",
"line": 123,
"message": "DB call inside loop",
"snippet": "for x in items: db.query(x)",
})
assert f.severity == Severity.CRITICAL
assert f.file == "api.py"
assert f.line == 123
def test_roadmap_entropy():
f = roadmap_entropy.normalize({
"entropy_score": 0.65,
"risk_label": "high",
"base_count": 10,
"head_count": 15,
"item_count_delta": 5,
"description_churn": 0.25,
"priority_shuffles": 3,
})
assert f.severity == Severity.HIGH
assert "0.65" in f.message
def test_schema_evolution():
f = schema_evolution.normalize({
"operation": {"op_type": "drop_column", "args": ["users", "password"]},
"risk_level": "dangerous",
"rationale": "Dropping column causes data loss",
"recommendation": "Back up first",
})
assert f.severity == Severity.CRITICAL
assert f.recommendation == "Back up first"
def test_semantic_diff():
f = semantic_diff.normalize({
"path": "api.py",
"categories": ["REFACTOR", "FEATURE"],
"summary": "Modified api.py",
"added": 45,
"removed": 23,
})
assert f.severity == Severity.MEDIUM
assert f.file == "api.py"
def test_semantic_diff_large_churn():
f = semantic_diff.normalize({
"path": "big.py",
"categories": ["FEATURE"],
"summary": "Rewrote big.py",
"added": 200,
"removed": 50,
})
assert f.severity == Severity.HIGH
def test_test_flakiness():
f = test_flakiness.normalize({
"test_id": "tests.integration::test_auth",
"classname": "tests.integration",
"total_runs": 20,
"pass_count": 12,
"fail_count": 8,
"error_count": 0,
"skip_count": 0,
"flakiness_rate": 0.4,
"avg_duration": 1.25,
"duration_stddev": 0.35,
})
assert f.severity == Severity.HIGH
assert "40%" in f.message