update realtime rate limit for X

main
Shane 7 months ago
parent 3405572ab0
commit 167506ef30
  1. 70
      foodie_automator_google.py
  2. 131
      foodie_automator_reddit.py
  3. 2
      foodie_automator_rss.py
  4. 28
      foodie_engagement_tweet.py
  5. 89
      foodie_utils.py
  6. 25
      foodie_weekly_thread.py
  7. 22
      foodie_x_poster.py

@ -24,10 +24,12 @@ from foodie_config import (
)
from foodie_utils import (
load_json_file, save_json_file, get_image, generate_image_query,
upload_image_to_wp, select_best_persona, determine_paragraph_count,
upload_image_to_wp, determine_paragraph_count, insert_link_naturally,
is_interesting, generate_title_from_summary, summarize_with_gpt4o,
generate_category_from_summary, post_to_wp, prepare_post_data,
smart_image_and_filter, insert_link_naturally, get_flickr_image
select_best_author, smart_image_and_filter, get_flickr_image,
get_next_author_round_robin, fetch_duckduckgo_news_context,
check_author_rate_limit
)
from foodie_hooks import get_dynamic_hook, get_viral_share_prompt
from dotenv import load_dotenv
@ -246,47 +248,61 @@ def fetch_duckduckgo_news_context(trend_title, hours=24):
logging.error(f"Failed to fetch DuckDuckGo News context for '{trend_title}' after {MAX_RETRIES} attempts")
return trend_title
def curate_from_google_trends(geo_list=['US']):
def curate_from_google_trends():
try:
all_trends = []
for geo in geo_list:
trends = scrape_google_trends(geo=geo)
if trends:
all_trends.extend(trends)
if not all_trends:
global posted_titles_data, posted_titles, used_images
posted_titles_data = load_json_file(POSTED_TITLES_FILE, EXPIRATION_HOURS)
posted_titles = set(entry["title"] for entry in posted_titles_data)
used_images = set(entry["title"] for entry in load_json_file(USED_IMAGES_FILE, IMAGE_EXPIRATION_DAYS) if "title" in entry)
logging.debug(f"Loaded {len(posted_titles)} posted titles and {len(used_images)} used images")
trends = fetch_google_trends()
if not trends:
print("No Google Trends data available")
logging.info("No Google Trends data available")
return None, None, False
attempts = 0
max_attempts = 10
while attempts < max_attempts and all_trends:
trend = all_trends.pop(0)
while attempts < max_attempts and trends:
trend = trends.pop(0)
title = trend["title"]
link = trend.get("link", "https://trends.google.com/")
link = trend.get("link", "")
summary = trend.get("summary", "")
source_name = "Google Trends"
source_name = trend.get("source", "Google Trends")
original_source = f'<a href="{link}">{source_name}</a>'
if title in posted_titles:
print(f"Skipping already posted trend: {title}")
logging.info(f"Skipping already posted trend: {title}")
attempts += 1
continue
print(f"Trying Google Trend: {title} from {source_name}")
logging.info(f"Trying Google Trend: {title} from {source_name}")
image_query, relevance_keywords, main_topic, skip = smart_image_and_filter(title, summary)
try:
image_query, relevance_keywords, main_topic, skip = smart_image_and_filter(title, summary)
except Exception as e:
print(f"Smart image/filter error for '{title}': {e}")
logging.warning(f"Failed to process smart_image_and_filter for '{title}': {e}")
attempts += 1
continue
if skip:
logging.info(f"Skipping filtered Google Trend: {title}")
print(f"Skipping filtered trend: {title}")
logging.info(f"Skipping filtered trend: {title}")
attempts += 1
continue
ddg_context = fetch_duckduckgo_news_context(title)
scoring_content = f"{title}\n\n{summary}\n\nAdditional Context: {ddg_context}"
interest_score = is_interesting(scoring_content)
print(f"Interest Score for '{title[:50]}...': {interest_score}")
logging.info(f"Interest score for '{title}': {interest_score}")
if interest_score < 6:
logging.info(f"Google Trends Interest Too Low: {interest_score}")
print(f"Trend Interest Too Low: {interest_score}")
logging.info(f"Trend Interest Too Low: {interest_score}")
attempts += 1
continue
@ -308,6 +324,7 @@ def curate_from_google_trends(geo_list=['US']):
extra_prompt=extra_prompt
)
if not final_summary:
print(f"Summary failed for '{title}'")
logging.info(f"Summary failed for '{title}'")
attempts += 1
continue
@ -329,15 +346,17 @@ def curate_from_google_trends(geo_list=['US']):
category = post_data["categories"][0]
image_url, image_source, uploader, page_url = get_flickr_image(image_query, relevance_keywords, main_topic)
if not image_url:
print(f"Flickr image fetch failed for '{image_query}', trying fallback")
logging.warning(f"Flickr image fetch failed for '{image_query}', trying fallback")
image_url, image_source, uploader, page_url = get_image(image_query)
if not image_url:
print(f"All image uploads failed for '{title}' - posting without image")
logging.warning(f"All image uploads failed for '{title}' - posting without image")
image_source = None
uploader = None
page_url = None
hook = get_dynamic_hook(post_data["title"]).strip()
share_prompt = get_viral_share_prompt(post_data["title"], final_summary)
share_links_template = (
f'<p>{share_prompt} '
@ -362,7 +381,13 @@ def curate_from_google_trends(geo_list=['US']):
interest_score=interest_score,
should_post_tweet=True
)
if not post_id:
print(f"Failed to post to WordPress for '{title}'")
logging.warning(f"Failed to post to WordPress for '{title}'")
attempts += 1
continue
except Exception as e:
print(f"WordPress posting error for '{title}': {e}")
logging.error(f"Failed to post to WordPress for '{title}': {e}", exc_info=True)
attempts += 1
continue
@ -392,6 +417,7 @@ def curate_from_google_trends(geo_list=['US']):
should_post_tweet=False
)
except Exception as e:
print(f"Failed to update WordPress post '{title}' with share links: {e}")
logging.error(f"Failed to update WordPress post '{title}' with share links: {e}", exc_info=True)
finally:
is_posting = False
@ -399,23 +425,29 @@ def curate_from_google_trends(geo_list=['US']):
timestamp = datetime.now(timezone.utc).isoformat()
save_json_file(POSTED_TITLES_FILE, title, timestamp)
posted_titles.add(title)
print(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
if image_url:
save_json_file(USED_IMAGES_FILE, image_url, timestamp)
used_images.add(image_url)
print(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
return post_data, category, True
attempts += 1
print(f"WP posting failed for '{post_data['title']}'")
logging.info(f"WP posting failed for '{post_data['title']}'")
print("No interesting Google Trend found after attempts")
logging.info("No interesting Google Trend found after attempts")
return None, None, False
except Exception as e:
logging.error(f"Unexpected error in curate_from_google_trends: {e}", exc_info=True)
print(f"Unexpected error in curate_from_google_trends: {e}")
return None, None, False
def run_google_trends_automator():

@ -25,9 +25,11 @@ from foodie_config import (
from foodie_utils import (
load_json_file, save_json_file, get_image, generate_image_query,
upload_image_to_wp, determine_paragraph_count, insert_link_naturally,
summarize_with_gpt4o, generate_category_from_summary, post_to_wp,
prepare_post_data, select_best_author, smart_image_and_filter,
get_flickr_image
is_interesting, generate_title_from_summary, summarize_with_gpt4o,
generate_category_from_summary, post_to_wp, prepare_post_data,
select_best_author, smart_image_and_filter, get_flickr_image,
get_next_author_round_robin, fetch_duckduckgo_news_context,
check_author_rate_limit
)
from foodie_hooks import get_dynamic_hook, get_viral_share_prompt
import fcntl
@ -268,70 +270,72 @@ def fetch_reddit_posts():
def curate_from_reddit():
try:
articles = fetch_reddit_posts()
if not articles:
global posted_titles_data, posted_titles, used_images
posted_titles_data = load_json_file(POSTED_TITLES_FILE, EXPIRATION_HOURS)
posted_titles = set(entry["title"] for entry in posted_titles_data)
used_images = set(entry["title"] for entry in load_json_file(USED_IMAGES_FILE, IMAGE_EXPIRATION_DAYS) if "title" in entry)
logging.debug(f"Loaded {len(posted_titles)} posted titles and {len(used_images)} used images")
posts = fetch_reddit_posts()
if not posts:
print("No Reddit posts available")
logging.info("No Reddit posts available")
return None, None, False
articles.sort(key=lambda x: x["upvotes"], reverse=True)
reddit = praw.Reddit(
client_id=REDDIT_CLIENT_ID,
client_secret=REDDIT_CLIENT_SECRET,
user_agent=REDDIT_USER_AGENT
)
attempts = 0
max_attempts = 10
while attempts < max_attempts and articles:
article = articles.pop(0)
title = article["title"]
raw_title = article["raw_title"]
link = article["link"]
summary = article["summary"]
source_name = "Reddit"
original_source = '<a href="https://www.reddit.com/">Reddit</a>'
if raw_title in posted_titles:
logging.info(f"Skipping already posted post: {raw_title}")
while attempts < max_attempts and posts:
post = posts.pop(0)
title = post["title"]
link = post.get("link", "")
summary = post.get("summary", "")
source_name = post.get("source", "Reddit")
original_source = f'<a href="{link}">{source_name}</a>'
if title in posted_titles:
print(f"Skipping already posted Reddit post: {title}")
logging.info(f"Skipping already posted Reddit post: {title}")
attempts += 1
continue
print(f"Trying Reddit Post: {title} from {source_name}")
logging.info(f"Trying Reddit Post: {title} from {source_name}")
image_query, relevance_keywords, main_topic, skip = smart_image_and_filter(title, summary)
if skip or any(keyword in title.lower() or keyword in raw_title.lower() for keyword in RECIPE_KEYWORDS + ["homemade"]):
try:
image_query, relevance_keywords, main_topic, skip = smart_image_and_filter(title, summary)
except Exception as e:
print(f"Smart image/filter error for '{title}': {e}")
logging.warning(f"Failed to process smart_image_and_filter for '{title}': {e}")
attempts += 1
continue
if skip:
print(f"Skipping filtered Reddit post: {title}")
logging.info(f"Skipping filtered Reddit post: {title}")
attempts += 1
continue
top_comments = get_top_comments(link, reddit, limit=3)
ddg_context = fetch_duckduckgo_news_context(title)
content_to_summarize = f"{title}\n\n{summary}\n\nTop Comments:\n{'\n'.join(top_comments) if top_comments else 'None'}\n\nAdditional Context: {ddg_context}"
interest_score = is_interesting_reddit(
title,
summary,
article["upvotes"],
article["comment_count"],
top_comments
)
logging.info(f"Interest Score: {interest_score} for '{title}'")
scoring_content = f"{title}\n\n{summary}\n\nAdditional Context: {ddg_context}"
interest_score = is_interesting(scoring_content)
print(f"Interest Score for '{title[:50]}...': {interest_score}")
logging.info(f"Interest score for '{title}': {interest_score}")
if interest_score < 6:
print(f"Reddit Interest Too Low: {interest_score}")
logging.info(f"Reddit Interest Too Low: {interest_score}")
attempts += 1
continue
num_paragraphs = determine_paragraph_count(interest_score)
extra_prompt = (
f"Generate exactly {num_paragraphs} paragraphs.\n"
f"FOCUS: Summarize ONLY the provided content, focusing on its specific topic and details without mentioning the original title.\n"
f"Incorporate relevant insights from these top comments if available: {', '.join(top_comments) if top_comments else 'None'}.\n"
f"Incorporate relevant insights from this additional context if available: {ddg_context}.\n"
f"Do NOT introduce unrelated concepts unless in the content, comments, or additional context.\n"
f"If brief, expand on the core idea with relevant context about its appeal or significance.\n"
f"Do NOT introduce unrelated concepts unless in the content or additional context.\n"
f"Expand on the core idea with relevant context about its appeal or significance in food trends.\n"
f"Do not include emojis in the summary."
)
content_to_summarize = scoring_content
final_summary = summarize_with_gpt4o(
content_to_summarize,
source_name,
@ -340,12 +344,13 @@ def curate_from_reddit():
extra_prompt=extra_prompt
)
if not final_summary:
print(f"Summary failed for '{title}'")
logging.info(f"Summary failed for '{title}'")
attempts += 1
continue
final_summary = insert_link_naturally(final_summary, source_name, link)
# Use round-robin author selection
author = get_next_author_round_robin()
author_username = author["username"]
@ -361,15 +366,17 @@ def curate_from_reddit():
category = post_data["categories"][0]
image_url, image_source, uploader, page_url = get_flickr_image(image_query, relevance_keywords, main_topic)
if not image_url:
print(f"Flickr image fetch failed for '{image_query}', trying fallback")
logging.warning(f"Flickr image fetch failed for '{image_query}', trying fallback")
image_url, image_source, uploader, page_url = get_image(image_query)
if not image_url:
print(f"All image uploads failed for '{title}' - posting without image")
logging.warning(f"All image uploads failed for '{title}' - posting without image")
image_source = None
uploader = None
page_url = None
hook = get_dynamic_hook(post_data["title"]).strip()
share_prompt = get_viral_share_prompt(post_data["title"], final_summary)
share_links_template = (
f'<p>{share_prompt} '
@ -377,7 +384,7 @@ def curate_from_reddit():
f'<a href="https://www.facebook.com/sharer/sharer.php?u={{post_url}}" target="_blank"><i class="tsi tsi-facebook"></i></a></p>'
)
post_data["content"] = f"{final_summary}\n\n{share_links_template}"
global is_posting
is_posting = True
try:
@ -394,7 +401,13 @@ def curate_from_reddit():
interest_score=interest_score,
should_post_tweet=True
)
if not post_id:
print(f"Failed to post to WordPress for '{title}'")
logging.warning(f"Failed to post to WordPress for '{title}'")
attempts += 1
continue
except Exception as e:
print(f"WordPress posting error for '{title}': {e}")
logging.error(f"Failed to post to WordPress for '{title}': {e}", exc_info=True)
attempts += 1
continue
@ -424,29 +437,37 @@ def curate_from_reddit():
should_post_tweet=False
)
except Exception as e:
print(f"Failed to update WordPress post '{title}' with share links: {e}")
logging.error(f"Failed to update WordPress post '{title}' with share links: {e}", exc_info=True)
finally:
is_posting = False
timestamp = datetime.now(timezone.utc).isoformat()
save_json_file(POSTED_TITLES_FILE, raw_title, timestamp)
posted_titles.add(raw_title)
logging.info(f"Successfully saved '{raw_title}' to {POSTED_TITLES_FILE}")
save_json_file(POSTED_TITLES_FILE, title, timestamp)
posted_titles.add(title)
print(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
if image_url:
save_json_file(USED_IMAGES_FILE, image_url, timestamp)
used_images.add(image_url)
print(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Reddit *****")
logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Reddit *****")
return post_data, category, True
attempts += 1
print(f"WP posting failed for '{post_data['title']}'")
logging.info(f"WP posting failed for '{post_data['title']}'")
print("No interesting Reddit post found after attempts")
logging.info("No interesting Reddit post found after attempts")
return None, None, False
except Exception as e:
logging.error(f"Unexpected error in curate_from_reddit: {e}", exc_info=True)
print(f"Unexpected error in curate_from_reddit: {e}")
return None, None, False
def run_reddit_automator():

@ -28,7 +28,7 @@ from foodie_utils import (
is_interesting, generate_title_from_summary, summarize_with_gpt4o,
generate_category_from_summary, post_to_wp, prepare_post_data,
select_best_author, smart_image_and_filter, get_flickr_image,
get_next_author_round_robin # Add this line
get_next_author_round_robin, check_author_rate_limit
)
from foodie_hooks import get_dynamic_hook, get_viral_share_prompt
from dotenv import load_dotenv

@ -8,7 +8,7 @@ import fcntl
import os
from datetime import datetime, timedelta, timezone
from openai import OpenAI
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL, load_post_counts, save_post_counts
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL, check_author_rate_limit
from foodie_config import X_API_CREDENTIALS, AUTHOR_BACKGROUNDS_FILE
from dotenv import load_dotenv
@ -161,23 +161,15 @@ def post_engagement_tweet():
logging.info("Starting foodie_engagement_tweet.py")
print("Starting foodie_engagement_tweet.py")
# Load post counts to check limits
post_counts = load_post_counts()
for author in AUTHORS:
# Check if the author can post before generating the tweet
can_post, remaining, reset = check_author_rate_limit(author)
if not can_post:
reset_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset)) if reset else "Unknown"
logging.info(f"Skipping engagement tweet for {author['username']} due to rate limit. Remaining: {remaining}, Reset at: {reset_time}")
continue
try:
# Check post limits
author_count = next((entry for entry in post_counts if entry["username"] == author["username"]), None)
if not author_count:
logging.error(f"No post count entry for {author['username']}, skipping")
continue
if author_count["monthly_count"] >= 500:
logging.warning(f"Monthly post limit (500) reached for {author['username']}, skipping")
continue
if author_count["daily_count"] >= 15:
logging.warning(f"Daily post limit (15) reached for {author['username']}, skipping")
continue
tweet = generate_engagement_tweet(author)
if not tweet:
logging.error(f"Failed to generate engagement tweet for {author['username']}, skipping")
@ -187,10 +179,6 @@ def post_engagement_tweet():
print(f"Posting engagement tweet for {author['username']}: {tweet}")
if post_tweet(author, tweet):
logging.info(f"Successfully posted engagement tweet for {author['username']}")
# Update post counts
author_count["monthly_count"] += 1
author_count["daily_count"] += 1
save_post_counts(post_counts)
else:
logging.warning(f"Failed to post engagement tweet for {author['username']}")
except Exception as e:

@ -199,7 +199,6 @@ def post_tweet(author, tweet, reply_to_id=None):
from foodie_config import X_API_CREDENTIALS
import logging
import tweepy
from datetime import datetime, timezone
credentials = X_API_CREDENTIALS.get(author["username"])
if not credentials:
@ -212,15 +211,6 @@ def post_tweet(author, tweet, reply_to_id=None):
if reply_to_id:
logging.debug(f"Replying to tweet ID: {reply_to_id}")
post_counts = load_post_counts()
author_count = next((entry for entry in post_counts if entry["username"] == author["username"]), None)
if author_count["monthly_count"] >= 500:
logging.warning(f"Monthly post limit (500) reached for {author['username']}")
return False
if author_count["daily_count"] >= 15: # Updated daily limit
logging.warning(f"Daily post limit (15) reached for {author['username']}")
return False
try:
client = tweepy.Client(
consumer_key=credentials["api_key"],
@ -232,9 +222,6 @@ def post_tweet(author, tweet, reply_to_id=None):
text=tweet,
in_reply_to_tweet_id=reply_to_id
)
author_count["monthly_count"] += 1
author_count["daily_count"] += 1
save_post_counts(post_counts)
logging.info(f"Posted tweet for {author['username']} (handle: {credentials['x_username']}): {tweet}")
logging.debug(f"Tweet ID: {response.data['id']}")
return {"id": response.data["id"]}
@ -1170,11 +1157,61 @@ def select_best_author(content, interest_score):
logging.error(f"Error in select_best_author: {e}")
return random.choice(list(PERSONA_CONFIGS.keys()))
def check_rate_limit(response):
"""Extract rate limit information from Twitter API response headers."""
try:
remaining = int(response.get('x-rate-limit-remaining', 0))
reset = int(response.get('x-rate-limit-reset', 0))
return remaining, reset
except (ValueError, TypeError) as e:
logging.warning(f"Failed to parse rate limit headers: {e}")
return None, None
def check_author_rate_limit(author):
"""Check the rate limit for a specific author by making a lightweight API call."""
credentials = X_API_CREDENTIALS.get(author["username"])
if not credentials:
logging.error(f"No X credentials found for {author['username']}")
return False, None, None
try:
client = tweepy.Client(
consumer_key=credentials["api_key"],
consumer_secret=credentials["api_secret"],
access_token=credentials["access_token"],
access_token_secret=credentials["access_token_secret"],
return_type=dict
)
# Use a lightweight endpoint to check rate limits (e.g., /users/me)
response = client.get_me()
remaining, reset = check_rate_limit(response)
if remaining is None or reset is None:
logging.warning(f"Could not determine rate limit for {author['username']}. Assuming rate limit is not hit.")
return True, None, None
if remaining <= 0:
reset_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset))
logging.info(f"Author {author['username']} is rate-limited. Remaining: {remaining}, Reset at: {reset_time}")
return False, remaining, reset
logging.debug(f"Author {author['username']} can post. Remaining: {remaining}, Reset at: {time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset))}")
return True, remaining, reset
except tweepy.TweepyException as e:
logging.error(f"Failed to check rate limit for {author['username']}: {e}")
if e.response and e.response.status_code == 429:
remaining, reset = check_rate_limit(e.response)
if remaining is not None and reset is not None:
reset_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset))
logging.info(f"Author {author['username']} is rate-limited. Remaining: {remaining}, Reset at: {reset_time}")
return False, remaining, reset
logging.warning(f"Assuming {author['username']} is rate-limited due to error.")
return False, None, None
except Exception as e:
logging.error(f"Unexpected error checking rate limit for {author['username']}: {e}", exc_info=True)
return False, None, None
def get_next_author_round_robin():
"""Select the next author in a round-robin fashion, respecting daily tweet limits."""
"""Select the next author in a round-robin fashion, ensuring they are not rate-limited."""
last_author_file = "/home/shane/foodie_automator/last_author.json"
authors = [author["username"] for author in AUTHORS]
post_counts = load_post_counts()
# Load the last used author
try:
@ -1188,20 +1225,17 @@ def get_next_author_round_robin():
logging.warning(f"Failed to load last author from {last_author_file}: {e}. Starting from first author.")
last_index = -1
# Find the next author who hasn't reached the daily limit
# Find the next author who is not rate-limited
start_index = (last_index + 1) % len(authors)
for i in range(len(authors)):
current_index = (start_index + i) % len(authors)
username = authors[current_index]
author_count = next((entry for entry in post_counts if entry["username"] == username), None)
if not author_count:
logging.error(f"No post count entry for {username}, skipping")
continue
if author_count["daily_count"] >= 15: # Updated daily limit
logging.info(f"Author {username} has reached daily limit ({author_count['daily_count']}/15), skipping")
continue
if author_count["monthly_count"] >= 500:
logging.info(f"Author {username} has reached monthly limit ({author_count['monthly_count']}/500), skipping")
author = next(author for author in AUTHORS if author["username"] == username)
# Check if the author can post based on rate limits
can_post, remaining, reset = check_author_rate_limit(author)
if not can_post:
logging.info(f"Skipping author {username} due to rate limit.")
continue
# Save the current index as the last used author
@ -1212,10 +1246,9 @@ def get_next_author_round_robin():
except Exception as e:
logging.warning(f"Failed to save last author to {last_author_file}: {e}")
# Return the selected author
return next(author for author in AUTHORS if author["username"] == username)
return author
logging.warning("No authors available within daily/monthly limits. Selecting a random author as fallback.")
logging.warning("No authors available due to rate limits. Selecting a random author as fallback.")
return random.choice(AUTHORS)
def prepare_post_data(summary, title, main_topic=None):

@ -10,7 +10,7 @@ import time
from datetime import datetime, timedelta, timezone
import tweepy
from openai import OpenAI
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL, load_json_file
from foodie_utils import post_tweet, AUTHORS, SUMMARY_MODEL, load_json_file, check_author_rate_limit
from foodie_config import X_API_CREDENTIALS, RECENT_POSTS_FILE
from dotenv import load_dotenv
@ -310,9 +310,6 @@ def post_weekly_thread():
if username in posts_by_author:
posts_by_author[username].append(post)
# Load post counts to check limits
post_counts = load_post_counts()
# Post threads for each author
for author in AUTHORS:
username = author["username"]
@ -321,21 +318,13 @@ def post_weekly_thread():
logging.info(f"No posts found for {username}, skipping")
continue
# Check daily limit (each thread will use 3 tweets: lead + 2 thread tweets)
author_count = next((entry for entry in post_counts if entry["username"] == username), None)
if not author_count:
logging.error(f"No post count entry for {username}, skipping")
continue
if author_count["daily_count"] >= 15:
logging.warning(f"Daily post limit (15) reached for {username}, skipping")
continue
if author_count["daily_count"] + 3 > 15:
logging.warning(f"Posting thread for {username} would exceed daily limit (current: {author_count['daily_count']}, needed: 3), skipping")
# Check if the author can post before generating the thread
can_post, remaining, reset = check_author_rate_limit(author)
if not can_post:
reset_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset)) if reset else "Unknown"
logging.info(f"Skipping weekly thread for {username} due to rate limit. Remaining: {remaining}, Reset at: {reset_time}")
continue
if author_count["monthly_count"] >= 500:
logging.warning(f"Monthly post limit (500) reached for {username}, skipping")
continue
# Select top 2 posts (to fit within 3-tweet limit: lead + 2 posts)
author_posts = sorted(author_posts, key=lambda x: datetime.fromisoformat(x["timestamp"]), reverse=True)[:2]
logging.info(f"Selected {len(author_posts)} posts for {username}")

@ -9,7 +9,7 @@ import os
from datetime import datetime, timezone, timedelta
from openai import OpenAI
from foodie_config import OPENAI_API_KEY, AUTHORS, LIGHT_TASK_MODEL, PERSONA_CONFIGS, AUTHOR_BACKGROUNDS_FILE
from foodie_utils import load_json_file, post_tweet
from foodie_utils import load_json_file, post_tweet, check_author_rate_limit
from dotenv import load_dotenv
load_dotenv()
@ -99,10 +99,24 @@ def main():
global is_posting
logging.info("***** X Poster Launched *****")
for author in AUTHORS:
# Check if the author can post before generating the tweet
can_post, remaining, reset = check_author_rate_limit(author)
if not can_post:
reset_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(reset)) if reset else "Unknown"
logging.info(f"Skipping engagement tweet for {author['username']} due to rate limit. Remaining: {remaining}, Reset at: {reset_time}")
continue
is_posting = True
tweet = generate_engagement_tweet(author, author["persona"])
post_tweet(author, tweet)
is_posting = False
try:
tweet = generate_engagement_tweet(author, author["persona"])
if post_tweet(author, tweet):
logging.info(f"Successfully posted engagement tweet for {author['username']}")
else:
logging.warning(f"Failed to post engagement tweet for {author['username']}")
except Exception as e:
logging.error(f"Error posting engagement tweet for {author['username']}: {e}", exc_info=True)
finally:
is_posting = False
time.sleep(random.uniform(3600, 7200))
logging.info("X posting completed")

Loading…
Cancel
Save