### Description of the bug:
file not uploading
### Actual vs expected behavio…r:
expected behavior: file uploads and chat returns a result
actual behavior: file does not upload and chat returns a 500
### Any other information you'd like to share?
I'm trying to do chat with an uploaded file using this example code provided by AI studio
```
"""
Install the Google AI Python SDK
$ pip install google-generativeai
See the getting started guide for more information:
https://ai.google.dev/gemini-api/docs/get-started/python
"""
import os
import time
import google.generativeai as genai
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
def upload_to_gemini(path, mime_type=None):
"""Uploads the given file to Gemini.
See https://ai.google.dev/gemini-api/docs/prompting_with_media
"""
file = genai.upload_file(path, mime_type=mime_type)
print(f"Uploaded file '{file.display_name}' as: {file.uri}")
return file
def wait_for_files_active(files):
"""Waits for the given files to be active.
Some files uploaded to the Gemini API need to be processed before they can be
used as prompt inputs. The status can be seen by querying the file's "state"
field.
This implementation uses a simple blocking polling loop. Production code
should probably employ a more sophisticated approach.
"""
print("Waiting for file processing...")
for name in (file.name for file in files):
file = genai.get_file(name)
while file.state.name == "PROCESSING":
print(".", end="", flush=True)
time.sleep(10)
file = genai.get_file(name)
if file.state.name != "ACTIVE":
raise Exception(f"File {file.name} failed to process")
print("...all files ready")
print()
# Create the model
# See https://ai.google.dev/api/python/google/generativeai/GenerativeModel
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config=generation_config,
# safety_settings = Adjust safety settings
# See https://ai.google.dev/gemini-api/docs/safety-settings
)
# TODO Make these files available on the local file system
# You may need to update the file paths
files = [
upload_to_gemini("c:/users/nagol/downloads/myfile.pdf", mime_type="application/pdf"),
]
# Some files have a processing delay. Wait for them to be ready.
wait_for_files_active(files)
chat_session = model.start_chat(
history=[
{
"role": "user",
"parts": [
files[0],
"describe this file.",
],
},
{
"role": "model",
"parts": [
"Okay, ...",
],
},
]
)
response = chat_session.send_message("can you tell me about ...")
print(response.text)
```
but I notice when it says file uploaded, the response page says
```
{
"error": {
"code": 403,
"message": "Method doesn't allow unregistered callers (callers without established identity). Please use API Key or other form of API consumer identity to call this API.",
"status": "PERMISSION_DENIED"
}
}
```
which doesn't make sense, because I just configured the API key in the preceding line
`genai.configure(api_key=os.environ["GEMINI_API_KEY"])`
full stack trace
```
Uploaded file 'Dawn_of_Worlds_game_1_0Final.pdf' as: https://generativelanguage.googleapis.com/v1beta/files/jakzgrhxm5om
Waiting for file processing...
...all files ready
---------------------------------------------------------------------------
InternalServerError Traceback (most recent call last)
Cell In[8], line 94
74 wait_for_files_active(files)
76 chat_session = model.start_chat(
77 history=[
78 {
(...)
91 ]
92 )
---> 94 response = chat_session.send_message("player 1 turn")
96 print(response.text)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\generativeai\generative_models.py:586, in ChatSession.send_message(self, content, generation_config, safety_settings, stream, tools, tool_config, request_options)
579 #print("noo!",self._generation_config.get("candidate_count", 1))
580 #self._generation_config.set("candidate_count", 1)
582 raise ValueError(
583 "Invalid configuration: The chat functionality does not support `candidate_count` greater than 1."
584 )
--> 586 response = self.model.generate_content(
587 contents=history,
588 generation_config=generation_config,
589 safety_settings=safety_settings,
590 stream=stream,
591 tools=tools_lib,
592 tool_config=tool_config,
593 request_options=request_options,
594 )
596 self._check_response(response=response, stream=stream)
598 if self.enable_automatic_function_calling and tools_lib is not None:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\generativeai\generative_models.py:331, in GenerativeModel.generate_content(self, contents, generation_config, safety_settings, stream, tools, tool_config, request_options)
329 return generation_types.GenerateContentResponse.from_iterator(iterator)
330 else:
--> 331 response = self._client.generate_content(
332 request,
333 **request_options,
334 )
335 return generation_types.GenerateContentResponse.from_response(response)
336 except google.api_core.exceptions.InvalidArgument as e:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\ai\generativelanguage_v1beta\services\generative_service\client.py:827, in GenerativeServiceClient.generate_content(self, request, model, contents, retry, timeout, metadata)
824 self._validate_universe_domain()
826 # Send the request.
--> 827 response = rpc(
828 request,
829 retry=retry,
830 timeout=timeout,
831 metadata=metadata,
832 )
834 # Done; return the response.
835 return response
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\gapic_v1\method.py:131, in _GapicCallable.__call__(self, timeout, retry, compression, *args, **kwargs)
128 if self._compression is not None:
129 kwargs["compression"] = compression
--> 131 return wrapped_func(*args, **kwargs)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry\retry_unary.py:293, in Retry.__call__.<locals>.retry_wrapped_func(*args, **kwargs)
289 target = functools.partial(func, *args, **kwargs)
290 sleep_generator = exponential_sleep_generator(
291 self._initial, self._maximum, multiplier=self._multiplier
292 )
--> 293 return retry_target(
294 target,
295 self._predicate,
296 sleep_generator,
297 timeout=self._timeout,
298 on_error=on_error,
299 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry\retry_unary.py:153, in retry_target(target, predicate, sleep_generator, timeout, on_error, exception_factory, **kwargs)
149 # pylint: disable=broad-except
150 # This function explicitly must deal with broad exceptions.
151 except Exception as exc:
152 # defer to shared logic for handling errors
--> 153 _retry_error_helper(
154 exc,
155 deadline,
156 sleep,
157 error_list,
158 predicate,
159 on_error,
160 exception_factory,
161 timeout,
162 )
163 # if exception not raised, sleep before next attempt
164 time.sleep(sleep)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry\retry_base.py:212, in _retry_error_helper(exc, deadline, next_sleep, error_list, predicate_fn, on_error_fn, exc_factory_fn, original_timeout)
206 if not predicate_fn(exc):
207 final_exc, source_exc = exc_factory_fn(
208 error_list,
209 RetryFailureReason.NON_RETRYABLE_ERROR,
210 original_timeout,
211 )
--> 212 raise final_exc from source_exc
213 if on_error_fn is not None:
214 on_error_fn(exc)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\retry\retry_unary.py:144, in retry_target(target, predicate, sleep_generator, timeout, on_error, exception_factory, **kwargs)
142 for sleep in sleep_generator:
143 try:
--> 144 result = target()
145 if inspect.isawaitable(result):
146 warnings.warn(_ASYNC_RETRY_WARNING)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\timeout.py:120, in TimeToDeadlineTimeout.__call__.<locals>.func_with_timeout(*args, **kwargs)
117 # Avoid setting negative timeout
118 kwargs["timeout"] = max(0, self._timeout - time_since_first_attempt)
--> 120 return func(*args, **kwargs)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\google\api_core\grpc_helpers.py:78, in _wrap_unary_errors.<locals>.error_remapped_callable(*args, **kwargs)
76 return callable_(*args, **kwargs)
77 except grpc.RpcError as exc:
---> 78 raise exceptions.from_grpc_error(exc) from exc
InternalServerError: 500 An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting
```