perf + simpler: composite index, range-filtered protokoll, simpler profile
- Migration v9 adds idx_applications_user_flat_started on (user_id, flat_id, started_at DESC). Covers latest_applications_by_flat inner GROUP BY and the outer JOIN without a table scan. - Push the protokoll date range into SQL instead of pulling 5000 rows into Python and filtering there: new audit_in_range / errors_in_range helpers with a shared _range_filter_rows impl. Protokoll page limits 500, CSV export 5000. - _row_to_profile collapses to `dict(profile_row)`. ProfileModel (Pydantic) already validates and coerces types on the apply side, extras ignored. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
eb73b5e415
commit
cb617dd38a
3 changed files with 47 additions and 26 deletions
15
web/app.py
15
web/app.py
|
|
@ -795,12 +795,11 @@ def _parse_date_range(from_str: str | None, to_str: str | None) -> tuple[str | N
|
|||
return start, end
|
||||
|
||||
|
||||
def _collect_events(start_iso: str | None, end_iso: str | None) -> list[dict]:
|
||||
def _collect_events(start_iso: str | None, end_iso: str | None,
|
||||
limit: int = 500) -> list[dict]:
|
||||
users = {row["id"]: row["username"] for row in db.list_users()}
|
||||
events: list[dict] = []
|
||||
for a in db.recent_audit(None, limit=5000):
|
||||
if start_iso and a["timestamp"] < start_iso: continue
|
||||
if end_iso and a["timestamp"] >= end_iso: continue
|
||||
for a in db.audit_in_range(start_iso, end_iso, limit=limit):
|
||||
events.append({
|
||||
"kind": "audit", "ts": a["timestamp"], "source": "web",
|
||||
"actor": a["actor"], "action": a["action"],
|
||||
|
|
@ -808,9 +807,7 @@ def _collect_events(start_iso: str | None, end_iso: str | None) -> list[dict]:
|
|||
"user": users.get(a["user_id"], ""),
|
||||
"ip": a["ip"] or "",
|
||||
})
|
||||
for e in db.recent_errors(None, limit=5000):
|
||||
if start_iso and e["timestamp"] < start_iso: continue
|
||||
if end_iso and e["timestamp"] >= end_iso: continue
|
||||
for e in db.errors_in_range(start_iso, end_iso, limit=limit):
|
||||
events.append({
|
||||
"kind": "error", "ts": e["timestamp"], "source": e["source"],
|
||||
"actor": e["source"], "action": e["kind"],
|
||||
|
|
@ -863,7 +860,7 @@ def tab_admin(request: Request, section: str):
|
|||
to_str = q.get("to") or ""
|
||||
start_iso, end_iso = _parse_date_range(from_str or None, to_str or None)
|
||||
ctx.update({
|
||||
"events": _collect_events(start_iso, end_iso)[:500],
|
||||
"events": _collect_events(start_iso, end_iso, limit=500),
|
||||
"from_str": from_str, "to_str": to_str,
|
||||
})
|
||||
elif section == "benutzer":
|
||||
|
|
@ -886,7 +883,7 @@ def tab_logs_export(request: Request):
|
|||
import csv as _csv
|
||||
q = request.query_params
|
||||
start_iso, end_iso = _parse_date_range(q.get("from") or None, q.get("to") or None)
|
||||
events = _collect_events(start_iso, end_iso)
|
||||
events = _collect_events(start_iso, end_iso, limit=5000)
|
||||
|
||||
buf = io.StringIO()
|
||||
w = _csv.writer(buf, delimiter=",", quoting=_csv.QUOTE_MINIMAL)
|
||||
|
|
|
|||
|
|
@ -7,25 +7,14 @@ logger = logging.getLogger("web.apply_client")
|
|||
|
||||
|
||||
def _row_to_profile(profile_row) -> dict:
|
||||
"""Convert a user_profiles row to the apply service Profile dict."""
|
||||
"""Convert a user_profiles row into the payload dict for /apply.
|
||||
|
||||
Apply-side ProfileModel (Pydantic) validates + coerces types; we just
|
||||
hand over whatever the row has. `updated_at` and any other extra keys
|
||||
are ignored by the model."""
|
||||
if profile_row is None:
|
||||
return {}
|
||||
keys = [
|
||||
"salutation", "firstname", "lastname", "email", "telephone",
|
||||
"street", "house_number", "postcode", "city",
|
||||
"is_possessing_wbs", "wbs_type", "wbs_valid_till",
|
||||
"wbs_rooms", "wbs_adults", "wbs_children", "is_prio_wbs",
|
||||
"immomio_email", "immomio_password",
|
||||
]
|
||||
d = {}
|
||||
for k in keys:
|
||||
try:
|
||||
d[k] = profile_row[k]
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
for k in ("is_possessing_wbs", "is_prio_wbs"):
|
||||
d[k] = bool(d.get(k) or 0)
|
||||
return d
|
||||
return dict(profile_row)
|
||||
|
||||
|
||||
class ApplyClient:
|
||||
|
|
|
|||
35
web/db.py
35
web/db.py
|
|
@ -260,6 +260,11 @@ MIGRATIONS: list[str] = [
|
|||
CREATE INDEX IF NOT EXISTS idx_partnerships_from ON partnerships(from_user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_partnerships_to ON partnerships(to_user_id);
|
||||
""",
|
||||
# 0009: composite index for latest_applications_by_flat + last_application_for_flat
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_applications_user_flat_started
|
||||
ON applications(user_id, flat_id, started_at DESC);
|
||||
""",
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -780,6 +785,36 @@ def recent_audit(user_id: Optional[int], limit: int = 100) -> list[sqlite3.Row]:
|
|||
).fetchall())
|
||||
|
||||
|
||||
def _range_filter_rows(table: str, ts_col: str, start_iso: Optional[str],
|
||||
end_iso: Optional[str], limit: int) -> list[sqlite3.Row]:
|
||||
"""Date-range filtered fetch from an append-only table. Pushes the
|
||||
timestamp filter into SQL so we don't drag 5000 rows into Python just
|
||||
to discard most of them."""
|
||||
clauses, params = [], []
|
||||
if start_iso:
|
||||
clauses.append(f"{ts_col} >= ?")
|
||||
params.append(start_iso)
|
||||
if end_iso:
|
||||
clauses.append(f"{ts_col} < ?")
|
||||
params.append(end_iso)
|
||||
where = ("WHERE " + " AND ".join(clauses)) if clauses else ""
|
||||
params.append(limit)
|
||||
return list(_get_conn().execute(
|
||||
f"SELECT * FROM {table} {where} ORDER BY {ts_col} DESC LIMIT ?",
|
||||
params,
|
||||
).fetchall())
|
||||
|
||||
|
||||
def audit_in_range(start_iso: Optional[str], end_iso: Optional[str],
|
||||
limit: int = 500) -> list[sqlite3.Row]:
|
||||
return _range_filter_rows("audit_log", "timestamp", start_iso, end_iso, limit)
|
||||
|
||||
|
||||
def errors_in_range(start_iso: Optional[str], end_iso: Optional[str],
|
||||
limit: int = 500) -> list[sqlite3.Row]:
|
||||
return _range_filter_rows("errors", "timestamp", start_iso, end_iso, limit)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Retention cleanup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue