diff --git a/foodie_automator_google.py b/foodie_automator_google.py
index 9ee5d8a..8411ae7 100644
--- a/foodie_automator_google.py
+++ b/foodie_automator_google.py
@@ -178,85 +178,117 @@ def fetch_duckduckgo_news_context(trend_title, hours=24):
logging.warning(f"DuckDuckGo News context fetch failed for '{trend_title}': {e}")
return trend_title
-def curate_from_google_trends(geo_list=['US']):
- original_source = 'Google Trends'
- for geo in geo_list:
- trends = scrape_google_trends(geo=geo)
- if not trends:
- print(f"No trends available for geo={geo}")
- logging.info(f"No trends available for geo={geo}")
+def curate_from_google_trends():
+ # Fetch Google Trends data
+ trends = fetch_google_trends()
+ if not trends:
+ print("No Google Trends data available")
+ logging.info("No Google Trends data available")
+ return None, None, None
+
+ attempts = 0
+ max_attempts = 10
+ while attempts < max_attempts and trends:
+ trend = trends.pop(0)
+ title = trend["title"]
+ link = trend.get("link", "https://trends.google.com/")
+ summary = trend.get("summary", "")
+ source_name = "Google Trends"
+ original_source = f'{source_name}'
+
+ if title in posted_titles:
+ print(f"Skipping already posted trend: {title}")
+ logging.info(f"Skipping already posted trend: {title}")
+ attempts += 1
continue
- attempts = 0
- max_attempts = 10
- while attempts < max_attempts and trends:
- trend = trends.pop(0)
- title = trend["title"]
- link = trend["link"]
- search_volume = trend["search_volume"]
- print(f"Trying Trend: {title} with search volume: {search_volume} for geo={geo}")
- logging.info(f"Trying Trend: {title} with search volume: {search_volume} for geo={geo}")
-
- if title in posted_titles:
- print(f"Skipping already posted trend: {title}")
- logging.info(f"Skipping already posted trend: {title}")
- attempts += 1
- continue
+ print(f"Trying Google Trend: {title} from {source_name}")
+ logging.info(f"Trying Google Trend: {title} from {source_name}")
- image_query, relevance_keywords, skip = smart_image_and_filter(title, "")
- if skip:
- print(f"Skipping unwanted trend: {title}")
- logging.info(f"Skipping unwanted trend: {title}")
- attempts += 1
- continue
+ # Check if the trend should be filtered out
+ image_query, relevance_keywords, skip = smart_image_and_filter(title, summary)
+ if skip:
+ print(f"Skipping filtered Google Trend: {title}")
+ logging.info(f"Skipping filtered Google Trend: {title}")
+ attempts += 1
+ continue
- context = fetch_duckduckgo_news_context(title)
- scoring_content = f"{title}\n\n{context}"
- interest_score = is_interesting(scoring_content)
- logging.info(f"Interest score for '{title}' in geo={geo}: {interest_score}")
- if interest_score < 6:
- print(f"Trend Interest Too Low: {interest_score}")
- logging.info(f"Trend Interest Too Low: {interest_score}")
- attempts += 1
- continue
+ # Calculate interest score
+ scoring_content = f"{title}\n\n{summary}"
+ interest_score = is_interesting(scoring_content)
+ logging.info(f"Interest score for '{title}': {interest_score}")
+ if interest_score < 6:
+ print(f"Google Trends Interest Too Low: {interest_score}")
+ logging.info(f"Google Trends Interest Too Low: {interest_score}")
+ attempts += 1
+ continue
- num_paragraphs = determine_paragraph_count(interest_score)
- extra_prompt = (
- f"Generate exactly {num_paragraphs} paragraphs. "
- f"Do not mention Google Trends, Google, or include any links. "
- f"Summarize as a standalone food industry trend, focusing on '{title}' and its context."
- "Do not include emojis in the summary."
- )
- final_summary = summarize_with_gpt4o(
- scoring_content,
- source_name="Google Trends",
- source_url=link,
- interest_score=interest_score,
- extra_prompt=extra_prompt
- )
- if not final_summary:
- logging.info(f"Summary failed for '{title}'")
- attempts += 1
- continue
+ # Summarize the trend
+ num_paragraphs = determine_paragraph_count(interest_score)
+ extra_prompt = (
+ f"Generate exactly {num_paragraphs} paragraphs. "
+ f"FOCUS: Summarize ONLY the provided content, explicitly mentioning '{title}' and sticking to its specific topic and details. "
+ f"Do NOT introduce unrelated concepts. Expand on the core idea with relevant context about its appeal or significance in food trends."
+ "Do not include emojis in the summary."
+ )
+ content_to_summarize = scoring_content
+ final_summary = summarize_with_gpt4o(
+ content_to_summarize,
+ source_name,
+ link,
+ interest_score=interest_score,
+ extra_prompt=extra_prompt
+ )
+ if not final_summary:
+ logging.info(f"Summary failed for '{title}'")
+ attempts += 1
+ continue
- final_summary = insert_link_naturally(final_summary, "Google Trends", link)
- post_data, author, category, image_url, image_source, uploader, pixabay_url = prepare_post_data(final_summary, title)
- if not post_data:
- attempts += 1
- continue
+ final_summary = insert_link_naturally(final_summary, source_name, link)
- image_url, image_source, uploader, page_url = get_flickr_image_via_ddg(image_query, relevance_keywords)
- if not image_url:
- image_url, image_source, uploader, page_url = get_image(image_query)
+ # Prepare post data
+ post_data, author, category, image_url, image_source, uploader, pixabay_url = prepare_post_data(final_summary, title)
+ if not post_data:
+ attempts += 1
+ continue
- hook = get_dynamic_hook(post_data["title"]).strip()
- cta = select_best_cta(post_data["title"], final_summary, post_url=None)
- post_data["content"] = f"{final_summary}\n\n{cta}"
+ # Fetch image
+ image_url, image_source, uploader, page_url = get_flickr_image_via_ddg(image_query, relevance_keywords)
+ if not image_url:
+ image_url, image_source, uploader, page_url = get_image(image_query)
+
+ # Generate hooks and initial CTA
+ hook = get_dynamic_hook(post_data["title"]).strip()
+ cta = select_best_cta(post_data["title"], final_summary, post_url=None)
+
+ post_data["content"] = f"{final_summary}\n\n{cta}"
+
+ # Post to WordPress and tweet
+ global is_posting
+ is_posting = True
+ try:
+ post_id, post_url = post_to_wp(
+ post_data=post_data,
+ category=category,
+ link=link,
+ author=author,
+ image_url=image_url,
+ original_source=original_source,
+ image_source=image_source,
+ uploader=uploader,
+ pixabay_url=pixabay_url,
+ interest_score=interest_score,
+ should_post_tweet=True # Post the X tweet on the first call
+ )
+ finally:
+ is_posting = False
- global is_posting
+ if post_id:
+ cta = select_best_cta(post_data["title"], final_summary, post_url=post_url)
+ post_data["content"] = f"{final_summary}\n\n{cta}"
is_posting = True
try:
- post_id, post_url = post_to_wp(
+ post_to_wp(
post_data=post_data,
category=category,
link=link,
@@ -266,51 +298,33 @@ def curate_from_google_trends(geo_list=['US']):
image_source=image_source,
uploader=uploader,
pixabay_url=pixabay_url,
- interest_score=interest_score
+ interest_score=interest_score,
+ post_id=post_id,
+ should_post_tweet=False # Skip X tweet on the update call
)
finally:
is_posting = False
- if post_id:
- cta = select_best_cta(post_data["title"], final_summary, post_url=post_url)
- post_data["content"] = f"{final_summary}\n\n{cta}"
- is_posting = True
- try:
- post_to_wp(
- post_data=post_data,
- category=category,
- link=link,
- author=author,
- image_url=image_url,
- original_source=original_source,
- image_source=image_source,
- uploader=uploader,
- pixabay_url=pixabay_url,
- interest_score=interest_score,
- post_id=post_id
- )
- finally:
- is_posting = False
-
- timestamp = datetime.now(timezone.utc).isoformat()
- save_json_file(POSTED_TITLES_FILE, title, timestamp)
- posted_titles.add(title)
- logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
-
- if image_url:
- save_json_file(USED_IMAGES_FILE, image_url, timestamp)
- logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
-
- print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from trend for geo={geo} *****")
- logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from trend for geo={geo} *****")
- return post_data, category, random.randint(0, 1800)
-
- print(f"No interesting trend found for geo={geo}")
- logging.info(f"No interesting trend found for geo={geo}")
-
- print(f"No interesting trend found across regions {geo_list}")
- logging.info(f"No interesting trend found across regions {geo_list}")
- return None, None, random.randint(600, 1200)
+ timestamp = datetime.now(timezone.utc).isoformat()
+ save_json_file(POSTED_TITLES_FILE, title, timestamp)
+ posted_titles.add(title)
+ logging.info(f"Successfully saved '{title}' to {POSTED_TITLES_FILE}")
+
+ if image_url:
+ save_json_file(USED_IMAGES_FILE, image_url, timestamp)
+ used_images.add(image_url)
+ logging.info(f"Saved image '{image_url}' to {USED_IMAGES_FILE}")
+
+ print(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
+ logging.info(f"***** SUCCESS: Posted '{post_data['title']}' (ID: {post_id}) from Google Trends *****")
+ return post_data, category, random.randint(0, 1800)
+
+ attempts += 1
+ logging.info(f"WP posting failed for '{post_data['title']}'")
+
+ print("No interesting Google Trend found after attempts")
+ logging.info("No interesting Google Trend found after attempts")
+ return None, None, random.randint(600, 1800)
def run_google_trends_automator():
logging.info("***** Google Trends Automator Launched *****")
diff --git a/foodie_automator_reddit.py b/foodie_automator_reddit.py
index 4f33b79..ab34ba0 100644
--- a/foodie_automator_reddit.py
+++ b/foodie_automator_reddit.py
@@ -316,7 +316,7 @@ def curate_from_reddit():
uploader=uploader,
pixabay_url=pixabay_url,
interest_score=interest_score,
- post_tweet=True # Post the X tweet on the first call
+ should_post_tweet=True # Post the X tweet on the first call
)
finally:
is_posting = False
@@ -338,7 +338,7 @@ def curate_from_reddit():
pixabay_url=pixabay_url,
interest_score=interest_score,
post_id=post_id,
- post_tweet=False # Skip X tweet on the update call
+ should_post_tweet=False # Skip X tweet on the update call
)
finally:
is_posting = False
diff --git a/foodie_automator_rss.py b/foodie_automator_rss.py
index f247366..a200c5e 100644
--- a/foodie_automator_rss.py
+++ b/foodie_automator_rss.py
@@ -308,7 +308,7 @@ def curate_from_rss():
uploader=uploader,
pixabay_url=pixabay_url,
interest_score=interest_score,
- post_tweet=True # Post the X tweet on the first call
+ should_post_tweet=True # Post the X tweet on the first call
)
finally:
is_posting = False
@@ -330,7 +330,7 @@ def curate_from_rss():
pixabay_url=pixabay_url,
interest_score=interest_score,
post_id=post_id,
- post_tweet=False # Skip X tweet on the update call
+ should_post_tweet=False # Skip X tweet on the update call
)
finally:
is_posting = False
diff --git a/foodie_utils.py b/foodie_utils.py
index 3dcd3fb..8abec9e 100644
--- a/foodie_utils.py
+++ b/foodie_utils.py
@@ -620,7 +620,7 @@ def get_wp_tag_id(tag_name, wp_base_url, wp_username, wp_password):
logging.error(f"Failed to get WP tag ID for '{tag_name}': {e}")
return None
-def post_to_wp(post_data, category, link, author, image_url, original_source, image_source="Pixabay", uploader=None, pixabay_url=None, interest_score=4, post_id=None, post_tweet=True):
+def post_to_wp(post_data, category, link, author, image_url, original_source, image_source="Pixabay", uploader=None, pixabay_url=None, interest_score=4, post_id=None, should_post_tweet=True):
wp_base_url = "https://insiderfoodie.com/wp-json/wp/v2"
logging.info(f"Starting post_to_wp for '{post_data['title']}', image_source: {image_source}")
@@ -725,12 +725,12 @@ def post_to_wp(post_data, category, link, author, image_url, original_source, im
timestamp = datetime.now(timezone.utc).isoformat()
save_post_to_recent(post_data["title"], post_url, author["username"], timestamp)
- # Post article tweet to X only if post_tweet is True
- if post_tweet:
+ # Post article tweet to X only if should_post_tweet is True
+ if should_post_tweet:
try:
post = {"title": post_data["title"], "url": post_url}
tweet = generate_article_tweet(author, post, author["persona"])
- if post_tweet(author, tweet):
+ if post_tweet(author, tweet): # Use the actual post_tweet function
logging.info(f"Successfully posted article tweet for {author['username']} on X")
else:
logging.warning(f"Failed to post article tweet for {author['username']} on X")