Updated open API Syntax

my-fix-branch
Shane 7 months ago
parent c292b94da7
commit d7fd57af81
  1. 9
      foodie_automator_google.py
  2. 12
      foodie_automator_reddit.py
  3. 9
      foodie_automator_rss.py
  4. 25
      foodie_engagement_tweet.py
  5. 16
      foodie_utils.py
  6. 13
      foodie_weekly_thread.py
  7. 2
      requirements.txt

@ -226,10 +226,11 @@ def curate_from_google_trends():
# Summarize the trend
num_paragraphs = determine_paragraph_count(interest_score)
extra_prompt = (
f"Generate exactly {num_paragraphs} paragraphs. "
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details. "
f"Do NOT introduce unrelated concepts. Expand on the core idea with relevant context about its appeal or significance in food trends."
"Do not include emojis in the summary."
f"Generate exactly {num_paragraphs} paragraphs.\n"
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details.\n"
f"Do NOT introduce unrelated concepts.\n"
f"Expand on the core idea with relevant context about its appeal or significance in food trends.\n"
f"Do not include emojis in the summary."
)
content_to_summarize = scoring_content
final_summary = summarize_with_gpt4o(

@ -264,12 +264,12 @@ def curate_from_reddit():
num_paragraphs = determine_paragraph_count(interest_score)
extra_prompt = (
f"Generate exactly {num_paragraphs} paragraphs. "
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details. "
"Incorporate relevant insights from these top comments if available: {', '.join(top_comments) if top_comments else 'None'}. "
"Do NOT introduce unrelated concepts unless in the content or comments. "
"If brief, expand on the core idea with relevant context about its appeal or significance. "
"Do not include emojis in the summary."
f"Generate exactly {num_paragraphs} paragraphs.\n"
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details.\n"
f"Incorporate relevant insights from these top comments if available: {', '.join(top_comments) if top_comments else 'None'}.\n"
f"Do NOT introduce unrelated concepts unless in the content or comments.\n"
f"If brief, expand on the core idea with relevant context about its appeal or significance.\n"
f"Do not include emojis in the summary."
)
content_to_summarize = f"{title}\n\n{summary}"
if top_comments:

@ -266,10 +266,11 @@ def curate_from_rss():
num_paragraphs = determine_paragraph_count(interest_score)
extra_prompt = (
f"Generate exactly {num_paragraphs} paragraphs. "
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details. "
f"Do NOT introduce unrelated concepts. Expand on the core idea with relevant context about its appeal or significance."
"Do not include emojis in the summary."
f"Generate exactly {num_paragraphs} paragraphs.\n"
f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details.\n"
f"Do NOT introduce unrelated concepts.\n"
f"Expand on the core idea with relevant context about its appeal or significance.\n"
f"Do not include emojis in the summary."
)
content_to_summarize = scoring_content
final_summary = summarize_with_gpt4o(

@ -1,14 +1,21 @@
import random
import logging
from datetime import datetime, timedelta
import openai
from datetime import datetime, timedelta, timezone
from openai import OpenAI # Add this import
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL
from dotenv import load_dotenv # Add this import
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Load environment variables
load_dotenv()
# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def generate_engagement_tweet(author):
author_handle = author["handle"]
author_handle = author["x_username"] # Updated to use x_username from X_API_CREDENTIALS
prompt = (
f"Generate a concise tweet (under 280 characters) for {author_handle}. "
f"Create an engaging food-related question or statement to spark interaction. "
@ -18,7 +25,7 @@ def generate_engagement_tweet(author):
)
try:
response = openai.ChatCompletion.create(
response = client.chat.completions.create(
model=SUMMARY_MODEL,
messages=[
{"role": "system", "content": "You are a social media expert crafting engaging tweets."},
@ -35,13 +42,13 @@ def generate_engagement_tweet(author):
logging.warning(f"Failed to generate engagement tweet for {author['username']}: {e}")
# Fallback templates
engagement_templates = [
"Whats the most mouthwatering dish youve seen this week Share below and follow {handle} for more foodie ideas on InsiderFoodie.com Link: https://insiderfoodie.com",
"Food lovers unite Whats your go to comfort food Tell us and like this tweet for more tasty ideas from {handle} on InsiderFoodie.com Link: https://insiderfoodie.com",
"Ever tried a dish that looked too good to eat Share your favorites and follow {handle} for more culinary trends on InsiderFoodie.com Link: https://insiderfoodie.com",
"What food trend are you loving right now Let us know and like this tweet to keep up with {handle} on InsiderFoodie.com Link: https://insiderfoodie.com"
f"Whats the most mouthwatering dish youve seen this week Share below and follow {author_handle} for more foodie ideas on InsiderFoodie.com Link: https://insiderfoodie.com",
f"Food lovers unite Whats your go to comfort food Tell us and like this tweet for more tasty ideas from {author_handle} on InsiderFoodie.com Link: https://insiderfoodie.com",
f"Ever tried a dish that looked too good to eat Share your favorites and follow {author_handle} for more culinary trends on InsiderFoodie.com Link: https://insiderfoodie.com",
f"What food trend are you loving right now Let us know and like this tweet to keep up with {author_handle} on InsiderFoodie.com Link: https://insiderfoodie.com"
]
template = random.choice(engagement_templates)
return template.format(handle=author_handle)
return template
def post_engagement_tweet():
# Reference date for calculating the 2-day interval

@ -127,7 +127,6 @@ def generate_article_tweet(author, post, persona):
url = post["url"]
author_handle = f"@{author['username']}"
# Base tweet content
prompt = (
f"Generate a concise tweet (under 280 characters) for {author_handle} using the persona '{persona}'. "
f"Summarize the article '{title}' and include the link '{url}'. "
@ -137,8 +136,8 @@ def generate_article_tweet(author, post, persona):
f"Do not include hashtags or emojis."
)
response = openai.ChatCompletion.create(
model=SUMMARY_MODEL,
response = openai.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a social media expert crafting engaging tweets."},
{"role": "user", "content": prompt}
@ -307,7 +306,7 @@ def smart_image_and_filter(title, summary):
"for an image search about food industry trends or viral content. Prioritize specific terms if present, "
"otherwise focus on the main theme. "
"Return 'SKIP' if the article is about home appliances, recipes, promotions, or contains 'homemade', else 'KEEP'. "
"Return as JSON: {'image_query': 'specific term', 'relevance': ['keyword1', 'keyword2'], 'action': 'KEEP' or 'SKIP'}"
"Return as JSON with double quotes for all property names and string values (e.g., {\"image_query\": \"specific term\", \"relevance\": [\"keyword1\", \"keyword2\"], \"action\": \"KEEP\" or \"SKIP\"})."
)
response = client.chat.completions.create(
@ -321,11 +320,15 @@ def smart_image_and_filter(title, summary):
raw_result = response.choices[0].message.content.strip()
logging.info(f"Raw GPT smart image/filter response: '{raw_result}'")
# Remove ```json markers and fix single quotes in JSON structure
cleaned_result = re.sub(r'```json\s*|\s*```', '', raw_result).strip()
# Replace single quotes with double quotes, but preserve single quotes within string values
fixed_result = re.sub(r"(?<!\\)'(?=\s*[\w\s]*\])|(?<=\[|\{|\s)'|'(?=\s*[\]\},:])|(?<=\w)'(?=\s*:)", '"', cleaned_result)
try:
result = json.loads(cleaned_result)
result = json.loads(fixed_result)
except json.JSONDecodeError as e:
logging.warning(f"JSON parsing failed: {e}, raw: '{cleaned_result}'. Using fallback.")
logging.warning(f"JSON parsing failed: {e}, raw: '{fixed_result}'. Using fallback.")
return "food trends", ["cuisine", "dining"], False
if not isinstance(result, dict) or "image_query" not in result or "relevance" not in result or "action" not in result:
@ -468,6 +471,7 @@ def summarize_with_gpt4o(content, source_name, link, interest_score=0, extra_pro
full_prompt = (
f"{prompt}\n\n"
f"{extra_prompt}\n\n"
f"Avoid using the word 'elevate'—use more humanized language like 'level up' or 'bring to life'.\n"
f"Content to summarize:\n{content}\n\n"
f"Source: {source_name}\n"
f"Link: {link}"

@ -2,12 +2,15 @@ import json
from datetime import datetime, timedelta
import logging
import random
import openai
from openai import OpenAI # Add this import
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
RECENT_POSTS_FILE = "/home/shane/foodie_automator/recent_posts.json"
def load_recent_posts():
@ -45,7 +48,7 @@ def generate_intro_tweet(author):
)
try:
response = openai.ChatCompletion.create(
response = client.chat.completions.create(
model=SUMMARY_MODEL,
messages=[
{"role": "system", "content": "You are a social media expert crafting engaging tweets."},
@ -82,7 +85,7 @@ def post_weekly_thread():
# Group posts by author
posts_by_author = {}
for post in weekly_posts:
author = post["author"]
author = post["author_username"] # Updated to match the key in recent_posts.json
if author not in posts_by_author:
posts_by_author[author] = []
posts_by_author[author].append(post)
@ -94,8 +97,8 @@ def post_weekly_thread():
logging.info(f"No posts found for {author['username']} this week")
continue
# Sort by interest score and take top 10
author_posts.sort(key=lambda x: x.get("interest_score", 0), reverse=True)
# Sort by timestamp (as a proxy for interest_score) and take top 10
author_posts.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
top_posts = author_posts[:10]
if not top_posts:

@ -1,7 +1,7 @@
requests==2.32.3
selenium==4.29.0
duckduckgo_search==7.5.4
openai==1.35.3
openai==1.75.0
praw==7.8.1
beautifulsoup4==4.13.3
Pillow==11.1.0

Loading…
Cancel
Save