Internal server error with the model gemini-exp-1206

I’ve been getting an internal server error for the model gemini-exp-1206 since yesterday. I’m sending a prompt and 4 photos and this was working before. Here is the code which invokes the Gemini API.
Is this purely an issue on the Google side, or can I do anything on the client to address this error?

import io
import logging
import os
import base64
import PIL.Image

import google.generativeai as genai
from sentry_sdk import set_user

# Configure the API key
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
if not GOOGLE_API_KEY:
    print("Error: GOOGLE_API_KEY environment variable not set.")
    # Handle the error appropriately, e.g., exit the program or raise an exception.
    # exit(1)
genai.configure(api_key=GOOGLE_API_KEY)

model = genai.GenerativeModel('gemini-exp-1206')

def call_gemini_api(images, prompt):
    """
    Calls the Gemini API (gemini-pro-vision model) with both images and a prompt.

    Args:
        images: A list of image file paths.
        prompt: The text prompt to send to the Gemini API.

    Returns:
        The response from the Gemini API (or an error object).
    """
    try:
        if not images:
            return {'error': 'No images provided for processing.'}

        image_parts = []
        for image_path in images:
            pil_image = PIL.Image.open(image_path)

            # Get the correct MIME type based on the image format
            if pil_image.format == "JPEG":
                mime_type = "image/jpeg"
            elif pil_image.format == "PNG":
                mime_type = "image/png"
            elif pil_image.format == "GIF":
                mime_type = "image/gif"
            elif pil_image.format == "WEBP":
                mime_type = "image/webp"
            else:
                # Handle unsupported formats or unknown formats
                return {'error': f'Unsupported image format: {pil_image.format}'}

            # Convert the image to a byte stream (in-memory)
            img_byte_arr = io.BytesIO()

            # Save the image to the byte stream with the appropriate format
            if mime_type == "image/webp":
                pil_image.save(img_byte_arr, format="WEBP")
            else:
                # For other formats, use JPEG as a fallback
                pil_image.save(img_byte_arr, format="JPEG")

            img_byte_arr = img_byte_arr.getvalue()

            image_parts.append({
                'mime_type': mime_type,
                'data': base64.b64encode(img_byte_arr).decode('utf-8')
            })

        # Create the model instance
        # model = genai.GenerativeModel('gemini-pro-vision')

        # Send the prompt and images together
        response = model.generate_content([prompt] + image_parts)

        # Check for a valid response
        if response:
            return response.text
        else:
            return {'error': 'Failed to get a valid response from the Gemini API.'}

    except Exception as e:
        logging.exception(e)
        return {
            'error': 'An error occurred during the Gemini API call.',
            'exception': str(e)
        }

def advice(images, prompt, email=None):
    """
    Fetches fitness advice from the Gemini API based on images and a prompt.

    This function is intended to be called by an RQ worker.

    Args:
        images: A list of image file paths.
        prompt: The prompt to send to the Gemini API.
        email: User's email (optional, for logging/notifications).
        job_id: The ID of the RQ job (optional, for logging).

    Returns:
        The response from the Gemini API (or an error object).
    """
    if email:
        print(f"Processing Gemini request for {email}")
        set_user({"email": email})

    try:
        # Call the Gemini API
        result = call_gemini_api(images, prompt)

        if isinstance(result, dict) and 'error' in result:
            print(f"Gemini API call failed: {result['error']}")
            return {
                'error': result['error']
            }
        else:
            print(f"Gemini API call successful")
            # --- Convert markdown to HTML ---
            #html_result = markdown.markdown(result)
            return result

    except Exception as e:
        logging.exception(e)
        return {
            'error': 'An error occurred during Gemini API processing.',
            'exception': str(e)
        }

Yes. The same experience in my case: EXP-1114, EXP-1121 and EXP-1206 do not work properly. The error is

InternalServerError: 500 POST https://generativelanguage.googleapis.com/v1beta/models/gemini-exp-1114:generateContent?%24alt=json%3Benum-encoding%3Dint: An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting

Models randomly start working one by one at random but then offline again for hour(s).

  • Tried to use legacy API, new API, openAI API
  • Tried to create a new key
  • Tried to remove system prompt / samplers / settings
  • Tried to use various prompt
    And nothing worked.

Flash 2-0 works tho.

i have the same issue, only flash 2-0 works