fix no tweet issue

my-fix-branch
Shane 7 months ago
parent 8323744953
commit 5963712139
  1. 232
      foodie_automator_google.py
  2. 4
      foodie_automator_reddit.py
  3. 4
      foodie_automator_rss.py
  4. 8
      foodie_utils.py

@ -178,85 +178,117 @@ def fetch_duckduckgo_news_context(trend_title, hours=24):
logging.warning(f"DuckDuckGo News context fetch failed for '{trend_title}': {e}") logging.warning(f"DuckDuckGo News context fetch failed for '{trend_title}': {e}")
return trend_title return trend_title
def curate_from_google_trends(geo_list=['US']): def curate_from_google_trends():
original_source = '<a href="https://trends.google.com/">Google Trends</a>' # Fetch Google Trends data
for geo in geo_list: trends = fetch_google_trends()
trends = scrape_google_trends(geo=geo) if not trends:
if not trends: print("No Google Trends data available")
print(f"No trends available for geo={geo}") logging.info("No Google Trends data available")
logging.info(f"No trends available for geo={geo}") return None, None, None
attempts = 0
max_attempts = 10
while attempts < max_attempts and trends:
trend = trends.pop(0)
title = trend["title"]
link = trend.get("link", "https://trends.google.com/")
summary = trend.get("summary", "")
source_name = "Google Trends"
original_source = f'<a href="{link}">{source_name}</a>'
if title in posted_titles:
print(f"Skipping already posted trend: {title}")
logging.info(f"Skipping already posted trend: {title}")
attempts += 1
continue continue
attempts = 0 print(f"Trying Google Trend: {title} from {source_name}")
max_attempts = 10 logging.info(f"Trying Google Trend: {title} from {source_name}")
while attempts < max_attempts and trends:
trend = trends.pop(0)
title = trend["title"]
link = trend["link"]
search_volume = trend["search_volume"]
print(f"Trying Trend: {title} with search volume: {search_volume} for geo={geo}")
logging.info(f"Trying Trend: {title} with search volume: {search_volume} for geo={geo}")
if title in posted_titles:
print(f"Skipping already posted trend: {title}")
logging.info(f"Skipping already posted trend: {title}")
attempts += 1
continue
image_query, relevance_keywords, skip = smart_image_and_filter(title, "") # Check if the trend should be filtered out
if skip: image_query, relevance_keywords, skip = smart_image_and_filter(title, summary)
print(f"Skipping unwanted trend: {title}") if skip:
logging.info(f"Skipping unwanted trend: {title}") print(f"Skipping filtered Google Trend: {title}")
attempts += 1 logging.info(f"Skipping filtered Google Trend: {title}")
continue attempts += 1
continue
context = fetch_duckduckgo_news_context(title) # Calculate interest score
scoring_content = f"{title}\n\n{context}" scoring_content = f"{title}\n\n{summary}"
interest_score = is_interesting(scoring_content) interest_score = is_interesting(scoring_content)
logging.info(f"Interest score for '{title}' in geo={geo}: {interest_score}") logging.info(f"Interest score for '{title}': {interest_score}")
if interest_score < 6: if interest_score < 6:
print(f"Trend Interest Too Low: {interest_score}") print(f"Google Trends Interest Too Low: {interest_score}")
logging.info(f"Trend Interest Too Low: {interest_score}") logging.info(f"Google Trends Interest Too Low: {interest_score}")
attempts += 1 attempts += 1
continue continue
num_paragraphs = determine_paragraph_count(interest_score) # Summarize the trend
extra_prompt = ( num_paragraphs = determine_paragraph_count(interest_score)
f"Generate exactly {num_paragraphs} paragraphs. " extra_prompt = (
f"Do not mention Google Trends, Google, or include any links. " f"Generate exactly {num_paragraphs} paragraphs. "
f"Summarize as a standalone food industry trend, focusing on '{title}' and its context." f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details. "
"Do not include emojis in the summary." f"Do NOT introduce unrelated concepts. Expand on the core idea with relevant context about its appeal or significance in food trends."
) "Do not include emojis in the summary."
final_summary = summarize_with_gpt4o( )
scoring_content, content_to_summarize = scoring_content
source_name="Google Trends", final_summary = summarize_with_gpt4o(
source_url=link, content_to_summarize,
interest_score=interest_score, source_name,
extra_prompt=extra_prompt link,
) interest_score=interest_score,
if not final_summary: extra_prompt=extra_prompt
logging.info(f"Summary failed for '{title}'") )
attempts += 1 if not final_summary:
continue logging.info(f"Summary failed for '{title}'")
attempts += 1
continue
final_summary = insert_link_naturally(final_summary, "Google Trends", link) final_summary = insert_link_naturally(final_summary, source_name, link)
post_data, author, category, image_url, image_source, uploader, pixabay_url = prepare_post_data(final_summary, title)
if not post_data:
attempts += 1
continue
image_url, image_source, uploader, page_url = get_flickr_image_via_ddg(image_query, relevance_keywords) # Prepare post data
if not image_url: post_data, author, category, image_url, image_source, uploader, pixabay_url = prepare_post_data(final_summary, title)
image_url, image_source, uploader, page_url = get_image(image_query) if not post_data:
attempts += 1
continue
hook = get_dynamic_hook(post_data["title"]).strip() # Fetch image
cta = select_best_cta(post_data["title"], final_summary, post_url=None) image_url, image_source, uploader, page_url = get_flickr_image_via_ddg(image_query, relevance_keywords)
post_data["content"] = f"{final_summary}\n\n{cta}" if not image_url:
image_url, image_source, uploader, page_url = get_image(image_query)
# Generate hooks and initial CTA
hook = get_dynamic_hook(post_data["title"]).strip()
cta = select_best_cta(post_data["title"], final_summary, post_url=None)
post_data["content"] = f"{final_summary}\n\n{cta}"
# Post to WordPress and tweet
global is_posting
is_posting = True
try:
post_id, post_url = post_to_wp(
post_data=post_data,
category=category,
link=link,
author=author,
image_url=image_url,
original_source=original_source,
image_source=image_source,
uploader=uploader,
pixabay_url=pixabay_url,
interest_score=interest_score,
should_post_tweet=True # Post the X tweet on the first call
)
finally:
is_posting = False
global is_posting if post_id:
cta = select_best_cta(post_data["title"], final_summary, post_url=post_url)
post_data["content"] = f"{final_summary}\n\n{cta}"
is_posting = True is_posting = True
try: try:
post_id, post_url = post_to_wp( post_to_wp(
post_data=post_data, post_data=post_data,
category=category, category=category,
link=link, link=link,
@ -266,51 +298,33 @@ def curate_from_google_trends(geo_list=['US']):
image_source=image_source, image_source=image_source,
uploader=uploader, uploader=uploader,
pixabay_url=pixabay_url, pixabay_url=pixabay_url,
interest_score=interest_score interest_score=interest_score,
post_id=post_id,
should_post_tweet=False # Skip X tweet on the update call
) )
finally: finally:
is_posting = False is_posting = False
if post_id: timestamp = datetime.now(timezone.utc).isoformat()
cta = select_best_cta(post_data["title"], final_summary, post_url=post_url) save_json_file(POSTED_TITLES_FILE, title, timestamp)
post_data["content"] = f"{final_summary}\n\n{cta}" posted_titles.add(title)
is_posting = True logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
try:
post_to_wp( if image_url:
post_data=post_data, save_json_file(USED_IMAGES_FILE, image_url, timestamp)
category=category, used_images.add(image_url)
link=link, logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
author=author,
image_url=image_url, print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
original_source=original_source, logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
image_source=image_source, return post_data, category, random.randint(0, 1800)
uploader=uploader,
pixabay_url=pixabay_url, attempts += 1
interest_score=interest_score, logging.info(f"WP posting failed for '{post_data['title']}'")
post_id=post_id
) print("No interesting Google Trend found after attempts")
finally: logging.info("No interesting Google Trend found after attempts")
is_posting = False return None, None, random.randint(600, 1800)
timestamp = datetime.now(timezone.utc).isoformat()
save_json_file(POSTED_TITLES_FILE, title, timestamp)
posted_titles.add(title)
logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
if image_url:
save_json_file(USED_IMAGES_FILE, image_url, timestamp)
logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from trend for geo={geo} *****")
logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from trend for geo={geo} *****")
return post_data, category, random.randint(0, 1800)
print(f"No interesting trend found for geo={geo}")
logging.info(f"No interesting trend found for geo={geo}")
print(f"No interesting trend found across regions {geo_list}")
logging.info(f"No interesting trend found across regions {geo_list}")
return None, None, random.randint(600, 1200)
def run_google_trends_automator(): def run_google_trends_automator():
logging.info("***** Google Trends Automator Launched *****") logging.info("***** Google Trends Automator Launched *****")

@ -316,7 +316,7 @@ def curate_from_reddit():
uploader=uploader, uploader=uploader,
pixabay_url=pixabay_url, pixabay_url=pixabay_url,
interest_score=interest_score, interest_score=interest_score,
post_tweet=True # Post the X tweet on the first call should_post_tweet=True # Post the X tweet on the first call
) )
finally: finally:
is_posting = False is_posting = False
@ -338,7 +338,7 @@ def curate_from_reddit():
pixabay_url=pixabay_url, pixabay_url=pixabay_url,
interest_score=interest_score, interest_score=interest_score,
post_id=post_id, post_id=post_id,
post_tweet=False # Skip X tweet on the update call should_post_tweet=False # Skip X tweet on the update call
) )
finally: finally:
is_posting = False is_posting = False

@ -308,7 +308,7 @@ def curate_from_rss():
uploader=uploader, uploader=uploader,
pixabay_url=pixabay_url, pixabay_url=pixabay_url,
interest_score=interest_score, interest_score=interest_score,
post_tweet=True # Post the X tweet on the first call should_post_tweet=True # Post the X tweet on the first call
) )
finally: finally:
is_posting = False is_posting = False
@ -330,7 +330,7 @@ def curate_from_rss():
pixabay_url=pixabay_url, pixabay_url=pixabay_url,
interest_score=interest_score, interest_score=interest_score,
post_id=post_id, post_id=post_id,
post_tweet=False # Skip X tweet on the update call should_post_tweet=False # Skip X tweet on the update call
) )
finally: finally:
is_posting = False is_posting = False

@ -620,7 +620,7 @@ def get_wp_tag_id(tag_name, wp_base_url, wp_username, wp_password):
logging.error(f"Failed to get WP tag ID for '{tag_name}': {e}") logging.error(f"Failed to get WP tag ID for '{tag_name}': {e}")
return None return None
def post_to_wp(post_data, category, link, author, image_url, original_source, image_source="Pixabay", uploader=None, pixabay_url=None, interest_score=4, post_id=None, post_tweet=True): def post_to_wp(post_data, category, link, author, image_url, original_source, image_source="Pixabay", uploader=None, pixabay_url=None, interest_score=4, post_id=None, should_post_tweet=True):
wp_base_url = "https://insiderfoodie.com/wp-json/wp/v2" wp_base_url = "https://insiderfoodie.com/wp-json/wp/v2"
logging.info(f"Starting post_to_wp for '{post_data['title']}', image_source: {image_source}") logging.info(f"Starting post_to_wp for '{post_data['title']}', image_source: {image_source}")
@ -725,12 +725,12 @@ def post_to_wp(post_data, category, link, author, image_url, original_source, im
timestamp = datetime.now(timezone.utc).isoformat() timestamp = datetime.now(timezone.utc).isoformat()
save_post_to_recent(post_data["title"], post_url, author["username"], timestamp) save_post_to_recent(post_data["title"], post_url, author["username"], timestamp)
# Post article tweet to X only if post_tweet is True # Post article tweet to X only if should_post_tweet is True
if post_tweet: if should_post_tweet:
try: try:
post = {"title": post_data["title"], "url": post_url} post = {"title": post_data["title"], "url": post_url}
tweet = generate_article_tweet(author, post, author["persona"]) tweet = generate_article_tweet(author, post, author["persona"])
if post_tweet(author, tweet): if post_tweet(author, tweet): # Use the actual post_tweet function
logging.info(f"Successfully posted article tweet for {author['username']} on X") logging.info(f"Successfully posted article tweet for {author['username']} on X")
else: else:
logging.warning(f"Failed to post article tweet for {author['username']} on X") logging.warning(f"Failed to post article tweet for {author['username']} on X")

Loading…
Cancel
Save