reddit_MCP / app.py
Jofthomas's picture
Update app.py
f09e07e verified
from __future__ import annotations
import os
import re
import html
from typing import List, Optional, Literal
import httpx
import feedparser
from pydantic import BaseModel, Field, HttpUrl
from fastmcp import FastMCP
from mistralai import Mistral
mcp = FastMCP(
name="reddit-painpoints",
host="0.0.0.0",
port=7860,
)
MISTRAL_MODEL = "mistral-medium-2508"
class PainPoint(BaseModel):
"""Structured representation of a user pain point extracted from a Reddit post."""
title: str = Field(..., description="Short title of the pain point")
summary: str = Field(..., description="One-sentence summary of the problem")
url: HttpUrl = Field(..., description="URL to the original Reddit post")
score: int = Field(..., description="Reddit score (upvotes minus downvotes)")
created_utc: float = Field(..., description="Post creation time (Unix seconds)")
post_id: str = Field(..., description="Reddit post ID")
flair: Optional[str] = Field(None, description="Post flair, if present")
class PainPointDecision(BaseModel):
decision: Literal["YES", "NO"]
reason: Optional[str] = None
class PainPointGenerated(BaseModel):
title: str
summary: str
def _fetch_subreddit_new(subreddit: str, limit: int) -> list[dict]:
"""Fetch 'new' posts; fallback to RSS if JSON is blocked."""
json_url = f"https://www.reddit.com/r/{subreddit}/new.json?limit={limit}"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) FastMCP-RedditPainPoints/1.0 (+https://example.com)",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.9",
}
try:
with httpx.Client(timeout=httpx.Timeout(15.0), headers=headers) as client:
response = client.get(json_url, follow_redirects=True)
response.raise_for_status()
payload = response.json()
children = payload.get("data", {}).get("children", [])
print(f"Reddit fetch source: JSON API ({len(children)} items)")
return [child.get("data", {}) for child in children]
except Exception as e:
print(f"Reddit JSON fetch failed: {e}; trying api.reddit.com")
try:
api_url = f"https://api.reddit.com/r/{subreddit}/new?limit={limit}"
with httpx.Client(timeout=httpx.Timeout(15.0), headers=headers) as client:
response = client.get(api_url, follow_redirects=True)
response.raise_for_status()
payload = response.json()
children = payload.get("data", {}).get("children", [])
print(f"Reddit fetch source: API domain ({len(children)} items)")
return [child.get("data", {}) for child in children]
except Exception as e2:
# RSS fallback
print(f"Reddit API fetch failed: {e2}; switching to RSS fallback")
feed_url = f"https://www.reddit.com/r/{subreddit}/new/.rss"
feed = feedparser.parse(feed_url)
posts: list[dict] = []
for entry in feed.entries[:limit]:
link = entry.get("link") or ""
title = entry.get("title") or ""
created_utc = 0.0
if getattr(entry, "published_parsed", None):
try:
import calendar
created_utc = float(calendar.timegm(entry.published_parsed))
except Exception:
created_utc = 0.0
# Extract a crude text body from RSS summary/content for better AI signal
raw_summary = getattr(entry, "summary", "") or getattr(entry, "description", "") or ""
if raw_summary:
text = html.unescape(re.sub(r"<[^>]+>", " ", raw_summary)).strip()
else:
text = ""
posts.append(
{
"title": title,
"selftext": text,
"score": None,
"created_utc": created_utc,
"id": entry.get("id") or "",
"permalink": "",
"url": link,
"link_flair_text": None,
}
)
print(f"Reddit fetch source: RSS fallback ({len(posts)} items)")
return posts
def _get_mistral_client() -> Mistral:
api_key = os.environ.get("MISTRAL_API_KEY")
if not api_key:
raise RuntimeError("MISTRAL_API_KEY environment variable is required for AI-based extraction")
return Mistral(api_key=api_key)
def _ai_should_extract_painpoint(client: Mistral, title: str, selftext: str) -> bool:
"""Use Mistral structured output to decide if the post is a pain point."""
content = (
"You are a strict classifier deciding if a Reddit post describes a concrete pain point "
"about Mistral AI documentation, mainly to know if they are missing information on how to achieve a specific goal.\n\n"
"Return JSON with decision YES/NO and a brief reason. Always return YES if it's someone wishing to switch from CHATGPT to Le Chat or Uncertainty about Mistral's compatibility and user experience with OpenCode CLI for Python programming. Return YES ONLY for those two."
)
user_text = (
f"Title: {title}\n\n"
f"Body: {selftext or '(none)'}\n\n"
"Does this describe a problem my team, Developper relation, in charge of documentation should tackle ?"
)
resp = client.chat.parse(
model=MISTRAL_MODEL,
messages=[
{"role": "system", "content": content},
{"role": "user", "content": user_text},
],
response_format=PainPointDecision,
temperature=0,
max_tokens=64,
)
parsed: PainPointDecision = resp.choices[0].message.parsed # type: ignore[attr-defined]
print(f"AI classify: {parsed.decision}")
return parsed.decision == "YES"
def _ai_generate_title_summary(client: Mistral, title: str, selftext: str) -> PainPointGenerated:
"""Use Mistral structured output to produce a concise title and summary."""
content = (
"You generate a clear, concise pain point title and a one-sentence summary that captures the core issue.\n"
"Do not add links or metadata. Keep the summary <= 240 characters."
)
user_text = (
f"Original Title: {title}\n\n"
f"Body: {selftext or '(none)'}\n\n"
"If insufficient information, infer a short neutral title and a crisp summary."
)
resp = client.chat.parse(
model=MISTRAL_MODEL,
messages=[
{"role": "system", "content": content},
{"role": "user", "content": user_text},
],
response_format=PainPointGenerated,
temperature=0,
max_tokens=128,
)
return resp.choices[0].message.parsed # type: ignore[return-value, attr-defined]
@mcp.tool(description="Scan r/MistralAI for problem-like posts using AI and return extracted pain points.")
def scan_mistralai_pain_points(limit: int = 50, min_score: int = 0) -> List[PainPoint]:
"""
Fetch recent posts from r/MistralAI and extract a list of pain points using a two-step AI flow:
1) Classify each post as a pain point (YES/NO)
2) If YES, generate a concise title and summary via structured outputs
- limit: Maximum posts to scan (<=100)
- min_score: Minimum Reddit score to include
"""
raw_posts = _fetch_subreddit_new("MistralAI", max(1, min(limit, 100)))
client = _get_mistral_client()
pain_points: List[PainPoint] = []
for post in raw_posts:
title = post.get("title", "").strip()
selftext = post.get("selftext", "") or ""
raw_score = post.get("score")
score = int(raw_score) if raw_score is not None else 0
# Only filter by score when a real score is available
if raw_score is not None and score < min_score:
print(f"Skip by score: '{title[:80]}' score={score} < min_score={min_score}")
continue
try:
should = _ai_should_extract_painpoint(client, title, selftext)
except Exception:
# On AI failure, skip the post to avoid false positives
print("AI classify failed; skipping post")
continue
if not should:
print(f"Classifier NO: '{title[:80]}'")
continue
try:
gen = _ai_generate_title_summary(client, title, selftext)
ai_title = gen.title.strip()
ai_summary = gen.summary.strip()
except Exception:
# If generation fails, fall back to minimal safe defaults
print("AI generation failed; using fallback title/summary")
ai_title = title
ai_summary = (selftext or title)[:240]
permalink = post.get("permalink") or ""
full_url = f"https://www.reddit.com{permalink}" if permalink else post.get("url_overridden_by_dest") or post.get("url") or ""
pain_points.append(
PainPoint(
title=ai_title or title,
summary=ai_summary,
url=full_url,
score=score,
created_utc=float(post.get("created_utc", 0.0) or 0.0),
post_id=str(post.get("id", "")),
flair=post.get("link_flair_text"),
)
)
print(f"Added: '{ai_title[:80]}'")
print(f"Extraction complete: {len(pain_points)} pain points")
return pain_points
if __name__ == "__main__":
mcp.run(transport="http")