fix flickr image large issue

my-fix-branch
Shane 7 months ago
parent cdc54f3f14
commit c936555741
  1. 119
      foodie_utils.py

@ -275,40 +275,51 @@ def get_image(search_query):
return None return None
def process_photo(photo): def process_photo(photo):
tags = [tag.text.lower() for tag in photo.getTags()] tags = [tag.text.lower() for tag in photo.getTags()]
title = photo.title.lower() if photo.title else "" title = photo.title.lower() if photo.title else ""
matched_keywords = [kw for kw in exclude_keywords if kw in tags or kw in title] matched_keywords = [kw for kw in exclude_keywords if kw in tags or kw in title]
if matched_keywords: if matched_keywords:
logging.info(f"Skipping image with unwanted keywords: {photo.id} (tags: {tags}, title: {title}, matched: {matched_keywords})") logging.info(f"Skipping image with unwanted keywords: {photo.id} (tags: {tags}, title: {title}, matched: {matched_keywords})")
return None return None
img_url = photo.getPhotoFile(size_label='Medium') # Try 'Large' size first, fall back to 'Medium' if unavailable
if not img_url or img_url in used_images: img_url = None
try:
img_url = photo.getPhotoFile(size_label='Large')
except flickr_api.flickrerrors.FlickrError as e:
logging.info(f"Large size not available for photo {photo.id}: {e}, trying Medium")
try:
img_url = photo.getPhotoFile(size_label='Medium')
except flickr_api.flickrerrors.FlickrError as e:
logging.warning(f"Medium size not available for photo {photo.id}: {e}")
return None return None
uploader = photo.owner.username if not img_url or img_url in used_images:
page_url = f"https://www.flickr.com/photos/{photo.owner.nsid}/{photo.id}" return None
used_images.add(img_url) uploader = photo.owner.username
save_used_images() page_url = f"https://www.flickr.com/photos/{photo.owner.nsid}/{photo.id}"
flickr_data = { used_images.add(img_url)
"title": search_query, save_used_images()
"image_url": img_url,
"source": "Flickr", flickr_data = {
"uploader": uploader, "title": search_query,
"page_url": page_url, "image_url": img_url,
"timestamp": datetime.now(timezone.utc).isoformat() "source": "Flickr",
} "uploader": uploader,
flickr_file = "/home/shane/foodie_automator/flickr_images.json" "page_url": page_url,
with open(flickr_file, 'a') as f: "timestamp": datetime.now(timezone.utc).isoformat()
json.dump(flickr_data, f) }
f.write('\n') flickr_file = "/home/shane/foodie_automator/flickr_images.json"
logging.info(f"Saved Flickr image metadata to {flickr_file}: {img_url}") with open(flickr_file, 'a') as f:
json.dump(flickr_data, f)
logging.info(f"Fallback Flickr image: {img_url} by {uploader} for query '{search_query}' (tags: {tags})") f.write('\n')
return img_url, "Flickr", uploader, page_url logging.info(f"Saved Flickr image metadata to {flickr_file}: {img_url}")
logging.info(f"Selected Flickr image: {img_url} by {uploader} for query '{search_query}' (tags: {tags})")
return img_url, "Flickr", uploader, page_url
def search_ddg_for_flickr(query): def search_ddg_for_flickr(query):
ddg_query = f"{query} site:flickr.com" ddg_query = f"{query} site:flickr.com"
@ -1010,24 +1021,19 @@ if os.path.exists(used_images_file):
else: else:
data = json.loads(content) data = json.loads(content)
if not isinstance(data, list): if not isinstance(data, list):
logging.warning(f"Invalid format in {used_images_file}: expected a list, got {type(data)}. Resetting.") logging.warning(f"Invalid format in {used_images_file}: expected a list, got {type(data)}. Converting to list.")
data = [] if isinstance(data, dict):
else: # If it's a dict, try to extract URLs from values
# Handle malformed format (list of lists or invalid entries) data = [v for v in data.values() if isinstance(v, str) and v.startswith('https://')]
flat_data = [] else:
for item in data: logging.warning(f"Cannot convert {type(data)} to list. Resetting to empty list.")
if isinstance(item, str) and item.startswith('https://'): data = []
flat_data.append(item) # Filter out non-string or non-URL entries
elif isinstance(item, list): data = [item for item in data if isinstance(item, str) and item.startswith('https://')]
logging.warning(f"Fixing malformed entry in {used_images_file}: {item}")
flat_data.extend([sub_item for sub_item in item if isinstance(sub_item, str) and sub_item.startswith('https://')])
else:
logging.warning(f"Skipping invalid entry in {used_images_file}: {item}")
data = flat_data
used_images.update(data) used_images.update(data)
logging.info(f"Loaded {len(used_images)} used image URLs from {used_images_file}") logging.info(f"Loaded {len(used_images)} used image URLs from {used_images_file}")
except Exception as e: except Exception as e:
logging.warning(f"Failed to load used images from {used_images_file}: {e}. Resetting file.") logging.warning(f"Failed to load used images from {used_images_file}: {e}. Resetting to empty set.")
used_images = set() used_images = set()
with open(used_images_file, 'w') as f: with open(used_images_file, 'w') as f:
json.dump([], f) json.dump([], f)
@ -1035,17 +1041,14 @@ if os.path.exists(used_images_file):
# Function to save used_images to file # Function to save used_images to file
def save_used_images(): def save_used_images():
try: try:
# Ensure used_images contains only valid URLs
valid_urls = [url for url in used_images if isinstance(url, str) and url.startswith('https://')]
if len(valid_urls) != len(used_images):
logging.warning(f"Found {len(used_images) - len(valid_urls)} invalid URLs in used_images set")
with open(used_images_file, 'w') as f: with open(used_images_file, 'w') as f:
f.write('[\n') json.dump(valid_urls, f, indent=2)
urls = list(used_images) logging.info(f"Saved {len(valid_urls)} used image URLs to {used_images_file}")
for i, url in enumerate(urls):
f.write(f'"{url}"')
if i < len(urls) - 1:
f.write(',\n')
else:
f.write('\n')
f.write(']')
logging.info(f"Saved {len(used_images)} used image URLs to {used_images_file}")
except Exception as e: except Exception as e:
logging.warning(f"Failed to save used images to {used_images_file}: {e}") logging.warning(f"Failed to save used images to {used_images_file}: {e}")

Loading…
Cancel
Save