Spaces:
Running
Running
from fastapi import FastAPI, File, UploadFile, Form, HTTPException | |
from fastapi.responses import JSONResponse | |
import tempfile | |
from dotenv import load_dotenv | |
import os | |
import google.generativeai as genai # Correct import alias | |
import json | |
import logging # Added for better debugging | |
load_dotenv() | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
app = FastAPI() | |
# --- Configuration --- | |
# Load API Key securely (e.g., from environment variable) | |
# Replace with your actual key retrieval method | |
API_KEY = os.getenv("GOOGLE_API_KEY") # Use environment variable or replace directly | |
if not API_KEY: | |
logger.error("GEMINI_API_KEY environment variable not set.") | |
# You might want to raise an exception or exit here in a real application | |
# For now, we'll let it proceed but it will fail later if the placeholder key is invalid | |
# Configure the Gemini client globally | |
try: | |
genai.configure(api_key=API_KEY) | |
logger.info("Google Gemini client configured successfully.") | |
except Exception as e: | |
logger.error(f"Failed to configure Google Gemini client: {e}") | |
# Handle configuration error appropriately | |
# Initialize the Generative Model globally | |
# Use a model that supports image input, like gemini-1.5-flash-latest or gemini-pro-vision | |
# gemini-1.5-flash is generally recommended now | |
try: | |
model = genai.GenerativeModel("gemini-2.0-flash") # Using the recommended flash model | |
logger.info(f"Google Gemini model '{model.model_name}' initialized.") | |
except Exception as e: | |
logger.error(f"Failed to initialize Google Gemini model: {e}") | |
# Handle model initialization error appropriately | |
# --- FastAPI Endpoint --- | |
async def rate_outfit(image: UploadFile = File(...), category: str = Form(...),occasion: str = Form(...),Place: str = Form(...),type_of_feedback: str = Form(...)): | |
logger.info(f"Received request to rate outfit. Category: {category}, Image: {image.filename}, Content-Type: {image.content_type}") | |
if image.content_type not in ["image/jpeg", "image/png", "image/jpg"]: | |
logger.warning(f"Invalid image content type: {image.content_type}") | |
raise HTTPException(status_code=400, detail="Please upload a valid image file (jpeg, png, jpg).") | |
tmp_path = None # Initialize tmp_path | |
try: | |
# Save image to temp file safely | |
# Using a context manager ensures the file is closed properly | |
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(image.filename)[1]) as tmp: | |
content = await image.read() | |
tmp.write(content) | |
tmp_path = tmp.name | |
logger.info(f"Image saved temporarily to: {tmp_path}") | |
# Upload image to Gemini using the recommended function | |
logger.info("Uploading image to Gemini...") | |
# The new API uses genai.upload_file directly | |
uploaded_file = genai.upload_file(path=tmp_path, display_name=image.filename) | |
logger.info(f"Image uploaded successfully: {uploaded_file.name}") | |
# Define the prompt clearly | |
prompt = ( | |
f"You are an AI fashion assistant. Based on the category '{category}', analyze the provided image." | |
"The user is going for an ocassion of {occasion} at {Place}, so it want {type_of_feedback} kind of feedback from you, so answer accordingly. Be very enthusiastic and excited " | |
"Extract the following information and provide the response ONLY as a valid JSON object, without any surrounding text, markdown formatting (like ```json), or explanations. " | |
"The JSON object should follow this exact schema: " | |
'{"Tag": "A short, catchy caption phrase based on the image, including a relevant emoji.", ' | |
'"Feedback": "Concise advice (1-2 sentences) on how the look could be improved or styled differently."}' | |
" --- IMPORTANT SAFETY CHECK: If the image contains nudity, offensive content, any religious context, political figure, or anything inappropriate for a fashion context, respond ONLY with the following JSON: " | |
'{"error": "Please upload an appropriate image"} --- ' | |
"Focus on being concise and eye-catching." | |
) | |
# Prepare content for the model (prompt first, then file) | |
# Ensure the uploaded file object is used, not just the path | |
content_parts = [prompt, uploaded_file] # Pass the UploadedFile object | |
logger.info("Generating content with Gemini model...") | |
# Generate content | |
response = model.generate_content(content_parts) | |
logger.info("Received response from Gemini.") | |
# logger.debug(f"Raw Gemini response text: {response.text}") # Optional: Log raw response for debugging | |
# Clean and parse the response | |
text_response = response.text.strip() | |
# Robust cleaning: Remove potential markdown code blocks | |
if text_response.startswith("```json"): | |
text_response = text_response[7:] # Remove ```json\n | |
if text_response.endswith("```"): | |
text_response = text_response[:-3] # Remove ``` | |
text_response = text_response.strip() # Strip again after removing markdown | |
logger.info(f"Cleaned Gemini response text: {text_response}") | |
# Attempt to parse the cleaned JSON | |
try: | |
result = json.loads(text_response) | |
# Validate if the result contains expected keys or the error key | |
if "error" in result: | |
logger.warning(f"Gemini detected inappropriate image: {result['error']}") | |
# Return a different status code for client-side handling? (e.g., 400 Bad Request) | |
# raise HTTPException(status_code=400, detail=result['error']) | |
# Or just return the error JSON as requested by some flows: | |
return JSONResponse(content=result, status_code=200) # Or 400 depending on desired API behavior | |
elif "Tag" not in result or "Feedback" not in result: | |
logger.error(f"Gemini response missing expected keys 'Tag' or 'Feedback'. Got: {result}") | |
raise HTTPException(status_code=500, detail="AI response format error: Missing expected keys.") | |
logger.info(f"Successfully parsed Gemini response: {result}") | |
return JSONResponse(content=result) | |
except json.JSONDecodeError as json_err: | |
logger.error(f"Failed to decode JSON response from Gemini: {json_err}") | |
logger.error(f"Invalid JSON string received: {text_response}") | |
raise HTTPException(status_code=500, detail="AI response format error: Invalid JSON.") | |
except Exception as parse_err: # Catch other potential errors during parsing/validation | |
logger.error(f"Error processing Gemini response: {parse_err}") | |
raise HTTPException(status_code=500, detail="Error processing AI response.") | |
except genai.types.generation_types.BlockedPromptException as block_err: | |
logger.warning(f"Gemini blocked the prompt or response due to safety settings: {block_err}") | |
# Return a generic safety message or the specific error JSON | |
error_response = {"error": "Request blocked due to safety policies. Please ensure the image is appropriate."} | |
# It's often better to return a 400 Bad Request here | |
return JSONResponse(content=error_response, status_code=400) | |
except Exception as e: | |
logger.error(f"An unexpected error occurred: {e}", exc_info=True) # Log full traceback | |
# Generic error for security reasons, details are logged | |
raise HTTPException(status_code=500, detail="An internal server error occurred.") | |
finally: | |
# Cleanup temp image file if it was created | |
if tmp_path and os.path.exists(tmp_path): | |
try: | |
os.remove(tmp_path) | |
logger.info(f"Temporary file {tmp_path} removed.") | |
except OSError as e: | |
logger.error(f"Error removing temporary file {tmp_path}: {e}") | |
# --- To Run (if this is the main script) --- | |
if __name__ == "__main__": | |
import uvicorn | |
# # Remember to set the GEMINI_API_KEY environment variable before running | |
# Example (Linux/macOS): export GEMINI_API_KEY='your_actual_api_key' | |
# # Example (Windows CMD): set GEMINI_API_KEY=your_actual_api_key | |
# # Example (Windows PowerShell): $env:GEMINI_API_KEY='your_actual_api_key' | |
uvicorn.run(app, host="0.0.0.0", port=8000) |