Compare commits

...

3 Commits

Author SHA1 Message Date
Brett 5db7b20e9f async 2025-07-03 22:14:49 -04:00
Brett ef258f05ad i hate async 2025-07-03 22:03:14 -04:00
Brett 301483810e screw it i think im done with this. 2025-07-03 21:33:49 -04:00
6 changed files with 65 additions and 54 deletions

Binary file not shown.

View File

@ -196,13 +196,13 @@ async def handle_article_url(message: discord.Message, url: str) -> None:
LOGGER.info("Received URL from %s: %s", message.author, url) LOGGER.info("Received URL from %s: %s", message.author, url)
try: try:
title, processed_html = await server.article_repository.get_article(url)
if await server.article_repository.has_paragraphs(url): if await server.article_repository.has_paragraphs(url):
await message.channel.send("This article has already been processed.") await message.channel.send("This article has already been processed.")
LOGGER.info(f"Article {url} already processed") LOGGER.info(f"Article {url} already processed")
return return
title, processed_html = await server.article_repository.fetch_article(url)
LOGGER.info(f"Article {url} has not been processed. Beginning now!") LOGGER.info(f"Article {url} has not been processed. Beginning now!")
summary_bot = ChatBot(summary_system_prompt) summary_bot = ChatBot(summary_system_prompt)
@ -347,22 +347,24 @@ async def on_message(message: discord.Message) -> None:
# Launch the processing task without blocking Discords event loop # Launch the processing task without blocking Discords event loop
asyncio.create_task(handle_article_url(message, url)) asyncio.create_task(handle_article_url(message, url))
def _run_flask_blocking() -> NoReturn: # helper returns never async def start_discord():
server.app.run(host="0.0.0.0", port=8000, debug=False, use_reloader=False) await bot.start(DISCORD_TOKEN)
async def main():
def main() -> None:
if DISCORD_TOKEN is None: if DISCORD_TOKEN is None:
raise RuntimeError("Set the DISCORD_TOKEN environment variable or add it to a .env file.") raise RuntimeError("Set the DISCORD_TOKEN environment variable or add it to a .env file.")
thread = threading.Thread(target=_run_flask_blocking, daemon=True, name="flask-api")
thread.start()
try: try:
bot.run(DISCORD_TOKEN) web_task = server.app.run_task(host="0.0.0.0", port=8000, debug=False)
discord_task = start_discord()
await asyncio.gather(web_task, discord_task)
finally: finally:
asyncio.run(PlaywrightPool.stop()) await PlaywrightPool.stop()
server.article_repository.close() server.article_repository.close()
if not bot.is_closed():
await bot.close()
if __name__ == "__main__": if __name__ == "__main__":
main() asyncio.run(main())

View File

@ -15,10 +15,6 @@ def process_html(html):
include_tables=True, include_comments=False, favor_recall=True) include_tables=True, include_comments=False, favor_recall=True)
LOGGER = logging.getLogger("pool") LOGGER = logging.getLogger("pool")
# logging.basicConfig(
# level=logging.INFO,
# format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
# )
class PlaywrightPool: class PlaywrightPool:
_pw = None # playwright instance _pw = None # playwright instance
@ -140,41 +136,47 @@ class ArticleRepository:
# ------------------------------------------------------------------ # # ------------------------------------------------------------------ #
# public API # public API
# ------------------------------------------------------------------ # # ------------------------------------------------------------------ #
async def get_article(self, url: str) -> tuple[str, str]:
"""
Main entry point.
Returns the processed text if it is already cached.
Otherwise downloads it, processes it, stores it, and returns it.
"""
# Single writer at a time when using sqlite3 avoids `database is locked` async def fetch_article(self, url: str) -> tuple[str, str]:
async with self._lock: async with self._lock:
row = self._row_for_url(url) result = self._get_article(url)
if result:
return result
if row: # row = (id, url, title, raw, processed) LOGGER.info(f"[ArticleRepository] Downloading article for {url}")
LOGGER.info(f"[ArticleRepository] Found cached article for {url}") title, raw_html = await PlaywrightPool.fetch_html(url)
return row[2], row[4] # processed_html already present processed_html = process_html(raw_html)
LOGGER.info(f"[ArticleRepository] Downloading article for {url}")
title, raw_html = await PlaywrightPool.fetch_html(url)
processed_html = process_html(raw_html)
async with self._lock:
# Upsert: # Upsert:
self._conn.execute( self._conn.execute(
f""" f"""
INSERT INTO articles (url, title, raw_html, processed_html) INSERT INTO articles (url, title, raw_html, processed_html)
VALUES ({self.cursor_type}, {self.cursor_type}, {self.cursor_type}, {self.cursor_type}) VALUES ({self.cursor_type}, {self.cursor_type}, {self.cursor_type}, {self.cursor_type})
ON CONFLICT(url) DO UPDATE SET ON CONFLICT(url) DO UPDATE SET
title=EXCLUDED.title, title=EXCLUDED.title,
raw_html=EXCLUDED.raw_html, raw_html=EXCLUDED.raw_html,
processed_html=EXCLUDED.processed_html processed_html=EXCLUDED.processed_html
""", """,
(url, title, raw_html, processed_html), (url, title, raw_html, processed_html),
) )
self._conn.commit() self._conn.commit()
return title, processed_html return title, processed_html
async def get_article(self, url: str) -> tuple[str, str] | None:
async with self._lock:
return self._get_article(url)
def _get_article(self, url: str) -> tuple[str, str] | None:
# Single writer at a time when using sqlite3 avoids `database is locked`
row = self._row_for_url(url)
if row: # row = (id, url, title, raw, processed)
LOGGER.info(f"[ArticleRepository] Found cached article for {url}")
return row[2], row[4] # processed_html already present
LOGGER.info(f"[ArticleRepository] Article was not found for {url}")
return None
async def has_paragraphs(self, url) -> bool: async def has_paragraphs(self, url) -> bool:
async with self._lock: async with self._lock:
@ -190,6 +192,13 @@ class ArticleRepository:
return False return False
return True return True
async def get_latest_articles(self, count):
async with self._lock:
cur = self._conn.cursor()
row = cur.execute(f"SELECT id, url, title, processed_html FROM articles ORDER BY id DESC LIMIT {self.cursor_type}", (count,))
return row.fetchall()
async def set_paragraphs(self, url, paragraphs, summary, summary_ratings, topics, topic_ratings): async def set_paragraphs(self, url, paragraphs, summary, summary_ratings, topics, topic_ratings):
async with self._lock: async with self._lock:
article_id = self._row_for_url(url)[0] article_id = self._row_for_url(url)[0]

View File

@ -1,36 +1,35 @@
from flask import Flask, request, jsonify, abort from quart import Quart, request, jsonify, abort
from pathlib import Path from pathlib import Path
import logging
# Import the repository class from the existing code base. # Import the repository class from the existing code base.
# Adjust the relative import path if pool.py lives in a package. # Adjust the relative import path if pool.py lives in a package.
from pool import ArticleRepository from pool import ArticleRepository
app = Flask(__name__) app = Quart(__name__)
article_repository = ArticleRepository() article_repository = ArticleRepository()
LOGGER = logging.getLogger("server")
@app.route("/health")
async def health():
return {"status": "ok"}
@app.route("/articles/<article_url>", methods=["GET"]) @app.route("/articles/<article_url>", methods=["GET"])
def get_article(article_url: str): async def get_article(article_url: str):
""" article = await article_repository.get_article(article_url)
Fetch one article by its numeric primary key.
Responds with the whole row in JSON or 404 if not present.
"""
article = article_repository.get_article(article_url)
if article is None: if article is None:
abort(404, description="Article not found") abort(404, description="Article not found")
return jsonify(article) return jsonify(article)
@app.route("/article-by-url", methods=["GET"]) @app.route("/article-by-url", methods=["GET"])
def get_article_by_url(): async def get_article_by_url():
"""
Same as above but lets a client specify the canonical URL instead of the ID:
GET /article-by-url?url=https://example.com/foo
"""
url = request.args.get("url") url = request.args.get("url")
if not url: if not url:
abort(400, description="`url` query parameter is required") abort(400, description="`url` query parameter is required")
LOGGER.info(f"Fetching article by URL: {url}")
article = await article_repository.get_article(url) article = await article_repository.get_article(url)
if article is None: if article is None:
abort(404, description="Article not found") abort(404, description="Article not found")

View File

@ -10,6 +10,7 @@ in pkgs.mkShell {
trafilatura trafilatura
playwright playwright
flask flask
quart
])) ]))
]; ];
propagatedBuildInputs = with pkgs; [ propagatedBuildInputs = with pkgs; [