updates people updates

This commit is contained in:
2026-02-15 17:31:33 +01:00
parent cde554c981
commit 8a0e48b3cb
5 changed files with 441 additions and 302 deletions

292
app.py
View File

@@ -1,166 +1,148 @@
#!/usr/bin/env python3
"""
Flask app to visualize a large SQLite database of speed tests.
- Plots time series for down_90th and up_90th
- Serves a lazily loaded table (server-side pagination)
- Designed for ~1GB DB: efficient SQLite pragmas + timestamp cursor pagination
Run:
export DB_PATH="/path/to/your/speedtests.sqlite3"
python3 app.py
Then open http://127.0.0.1:5000
Optional: create an index (speeds up range scans by timestamp):
sqlite3 "$DB_PATH" "CREATE INDEX IF NOT EXISTS idx_speed_tests_ts ON speed_tests(timestamp);"
"""
from __future__ import annotations
import os
import math
import sqlite3
import time
from datetime import datetime
import pytz
from flask import Flask, render_template, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import text
from flask import Flask, jsonify, request, render_template
from config import SERIES_MAX_POINTS_DEFAULT, DB_PATH, SERIES_MAX_POINTS_HARD, PAGE_SIZE_MAX, PAGE_SIZE_DEFAULT
app = Flask(__name__)
app = Flask(__name__, template_folder="templates")
# connect to your existing database file
# ensure the path is correct relative to where you run this script
db_path = os.path.join(os.getcwd(), 'speedtest.db')
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{db_path}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# ---------------------- SQLite Helpers ----------------------
db = SQLAlchemy(app)
def get_conn() -> sqlite3.Connection:
uri = f"file:{os.path.abspath(DB_PATH)}?cache=shared"
conn = sqlite3.connect(uri, uri=True, check_same_thread=False)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL;")
conn.execute("PRAGMA synchronous=NORMAL;")
conn.execute("PRAGMA temp_store=MEMORY;")
conn.execute("PRAGMA cache_size=-20000;") # ~20MB cache
return conn
# Reflect the existing table explicitly to match your schema
class SpeedTest(db.Model):
__tablename__ = 'speed_tests'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.Float, nullable=False)
failed = db.Column(db.Boolean, nullable=False)
isp = db.Column(db.String)
ip = db.Column(db.String)
location_code = db.Column(db.String)
location_city = db.Column(db.String)
location_region = db.Column(db.String)
latency = db.Column(db.Float)
jitter = db.Column(db.Float)
down_100kB = db.Column(db.Float)
down_1MB = db.Column(db.Float)
down_10MB = db.Column(db.Float)
down_25MB = db.Column(db.Float)
down_90th = db.Column(db.Float) # We will graph this
up_100kB = db.Column(db.Float)
up_1MB = db.Column(db.Float)
up_10MB = db.Column(db.Float)
up_90th = db.Column(db.Float) # We will graph this
CONN = get_conn()
# ---------------------- Utilities ----------------------
def ts_to_iso(ts: float | int | str) -> str:
try:
t = float(ts)
except Exception:
return str(ts)
return datetime.fromtimestamp(t, tz=pytz.timezone("Europe/Berlin")).isoformat()
# ---------------------- API Endpoints ----------------------
@app.get("/api/series")
def api_series():
q_from = request.args.get("from", type=float)
q_to = request.args.get("to", type=float)
max_points = request.args.get("max_points", type=int) or SERIES_MAX_POINTS_DEFAULT
max_points = max(1, min(max_points, SERIES_MAX_POINTS_HARD))
params = []
where = []
if q_from is not None:
where.append("timestamp >= ?")
params.append(q_from)
if q_to is not None:
where.append("timestamp <= ?")
params.append(q_to)
where_sql = ("WHERE " + " AND ".join(where)) if where else ""
cnt_sql = f"SELECT COUNT(*) AS n FROM speed_tests {where_sql};"
n_rows = CONN.execute(cnt_sql, params).fetchone()[0]
stride = 1 if n_rows <= max_points else math.ceil(n_rows / max_points)
sql = (
f"SELECT timestamp, down_90th, up_90th FROM speed_tests {where_sql} "
"ORDER BY timestamp ASC;"
)
rows = []
kept = 0
for i, r in enumerate(CONN.execute(sql, params)):
if (i % stride) == 0:
rows.append({
"t": float(r["timestamp"]),
"t_iso": ts_to_iso(r["timestamp"]),
"down_90th": None if r["down_90th"] is None else float(r["down_90th"]),
"up_90th": None if r["up_90th"] is None else float(r["up_90th"]),
})
kept += 1
if kept >= max_points:
break
return jsonify({
"count_total": n_rows,
"stride": stride,
"returned": len(rows),
"points": rows,
})
@app.get("/api/table")
def api_table():
limit = request.args.get("limit", type=int) or PAGE_SIZE_DEFAULT
limit = max(1, min(limit, PAGE_SIZE_MAX))
order = request.args.get("order", default="desc")
order = "ASC" if str(order).lower().startswith("asc") else "DESC"
cursor = request.args.get("cursor", type=float)
params = []
where = []
if cursor is not None:
if order == "ASC":
where.append("timestamp > ?")
else:
where.append("timestamp < ?")
params.append(cursor)
where_sql = ("WHERE " + " AND ".join(where)) if where else ""
sql = (
"SELECT id, timestamp, failed, isp, ip, location_code, location_city, location_region, "
"latency, jitter, down_100kB, down_1MB, down_10MB, down_25MB, down_90th, "
"up_100kB, up_1MB, up_10MB, up_90th "
f"FROM speed_tests {where_sql} ORDER BY timestamp {order} LIMIT ?;"
)
params2 = params + [limit]
rows = [dict(r) for r in CONN.execute(sql, params2).fetchall()]
next_cursor = None
if rows:
last_ts = rows[-1]["timestamp"]
try:
next_cursor = float(last_ts)
except Exception:
next_cursor = last_ts
for r in rows:
r["timestamp_iso"] = ts_to_iso(r["timestamp"])
return jsonify({
"limit": limit,
"order": order.lower(),
"count": len(rows),
"next_cursor": next_cursor,
"rows": rows,
})
@app.get("/")
@app.route('/')
def index():
return render_template(
"index.html",
db_name=os.path.basename(DB_PATH),
SERIES_MAX_POINTS_DEFAULT=SERIES_MAX_POINTS_DEFAULT,
SERIES_MAX_POINTS_HARD=SERIES_MAX_POINTS_HARD,
PAGE_SIZE_DEFAULT=PAGE_SIZE_DEFAULT,
PAGE_SIZE_MAX=PAGE_SIZE_MAX,
)
return render_template('index.html')
@app.route('/api/hourly_stats')
def get_hourly_stats():
# We use raw SQL for efficiency and to easily extract the hour from the timestamp
# strftime('%H') works on the unix timestamp if we convert it first
# SQLite: strftime('%H', datetime(timestamp, 'unixepoch'))
sql = text("""
SELECT
strftime('%H', datetime(timestamp, 'unixepoch', 'localtime')) as hour,
AVG(down_90th) as avg_down,
AVG(up_90th) as avg_up,
COUNT(*) as count
FROM speed_tests
WHERE failed = 0
GROUP BY hour
ORDER BY hour ASC
""")
result = db.session.execute(sql)
data = []
for row in result:
data.append({
'hour': row[0], # 00, 01, ... 23
'down': row[1],
'up': row[2],
'count': row[3]
})
return jsonify(data)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
@app.route('/api/data')
def get_data():
# Get start/end dates from query params (defaults to last 24h if missing)
start_str = request.args.get('start')
end_str = request.args.get('end')
# Default to last 7 days if no filter provided
now = time.time()
if start_str and end_str:
# Convert string 'YYYY-MM-DD' to unix timestamp
start_ts = datetime.strptime(start_str, '%Y-%m-%d').timestamp()
end_ts = datetime.strptime(end_str, '%Y-%m-%d').timestamp() + 86400 # Include the full end day
else:
start_ts = now - (7 * 24 * 60 * 60)
end_ts = now
# Query the database
query = SpeedTest.query.filter(
SpeedTest.timestamp >= start_ts,
SpeedTest.timestamp <= end_ts
).order_by(SpeedTest.timestamp.asc())
results = query.all()
# Process data for JSON response
data = []
total_tests = 0
failed_tests = 0
total_down = 0
total_up = 0
count_valid_speed = 0
for r in results:
total_tests += 1
if r.failed:
failed_tests += 1
# Format timestamp for JS (milliseconds)
ts_ms = r.timestamp * 1000
# Only include successful speeds in averages
if not r.failed and r.down_90th is not None and r.up_90th is not None:
total_down += r.down_90th
total_up += r.up_90th
count_valid_speed += 1
data.append({
'timestamp': ts_ms,
'readable_date': datetime.fromtimestamp(r.timestamp).strftime('%Y-%m-%d %H:%M'),
'down': r.down_90th,
'up': r.up_90th,
'failed': r.failed,
'latency': r.latency,
'isp': r.isp
})
# Calculate Averages and Uptime
uptime_pct = ((total_tests - failed_tests) / total_tests * 100) if total_tests > 0 else 0
avg_down = (total_down / count_valid_speed) if count_valid_speed > 0 else 0
avg_up = (total_up / count_valid_speed) if count_valid_speed > 0 else 0
return jsonify({
'rows': data,
'stats': {
'uptime': round(uptime_pct, 2),
'avg_down': round(avg_down, 2),
'avg_up': round(avg_up, 2),
'total_tests': total_tests
}
})
if __name__ == '__main__':
app.run(debug=True)